标签:finally 如何 set 就是 arch save led god commit
缓存算是持久化的一个子集,但是缓存又有自己的过期策略和缓存级别,而持久化基本无过期策略之说。缓存与持久化并不是 Python 爬虫特有的,其他语言都有涉及,所以我们下面既然说要把缓存和持久化放在一起说是建立在持久化缓存的基础上,因为多级缓存策略的内存缓存等不在我们这篇的讨论范畴
Python常见本地磁盘文件型数据持久化主要包括普通文件、DBM文件、Pickle序列化对象存储、shelve键值序列化对象存储,对于我们编写爬虫程序来说缓存的设计或者持久化方式我们可以自己依据自己的需求进行合适的评估选择,下面给出常见的本地磁盘文件型持久化样例
import dbm import pickle import shelve ‘‘‘ Python3 常用本地磁盘文件型持久化演示 ‘‘‘ class NormalFilePersistence(object): ‘‘‘ 普通文件持久化或者缓存持久化 ‘‘‘ def save(self, data): with open(‘NormalFilePersistence.txt‘, ‘w‘) as open_file: open_file.write(data) def load(self): with open(‘NormalFilePersistence.txt‘, ‘r‘) as open_file: return open_file.read() class DBMPersistence(object): ‘‘‘ DBM字符串键值对持久化或者缓存持久化 ‘‘‘ def save(self, key, value): try: dbm_file = dbm.open(‘DBMPersistence‘, ‘c‘) dbm_file[key] = str(value) finally: dbm_file.close() def load(self, key): try: dbm_file = dbm.open(‘DBMPersistence‘, ‘r‘) if key in dbm_file: result = dbm_file[key] else: result = None finally: dbm_file.close() return result class PicklePersistence(object): ‘‘‘ Pickle把复杂对象序列化到文件持久化或者缓存持久化 ‘‘‘ def save(self, obj): with open(‘PicklePersistence‘, ‘wb‘) as pickle_file: pickle.dump(obj, pickle_file) def load(self): with open(‘PicklePersistence‘, ‘rb‘) as pickle_file: return pickle.load(pickle_file) class ShelvePersistence(object): ‘‘‘ Shelve为DBM和Pickle的结合,以键值对的方式把复杂对象序列化到文件持久化或者缓存持久化 ‘‘‘ def save(self, key, obj): try: shelve_file = shelve.open(‘ShelvePersistence‘) shelve_file[key] = obj finally: shelve_file.close() def load(self, key): try: shelve_file = shelve.open(‘ShelvePersistence‘) if key in shelve_file: result = shelve_file[key] else: result = None finally: shelve_file.close() return result if __name__ == ‘__main__‘: t_normal = NormalFilePersistence() t_normal.save(‘Test NormalFilePersistence‘) print(‘NormalFilePersistence load: ‘ + t_normal.load()) t_dbm = DBMPersistence() t_dbm.save(‘user‘, ‘GJRS‘) t_dbm.save(‘age‘, 27) print(‘DBMPersistence load: ‘ + str(t_dbm.load(‘user‘))) print(‘DBMPersistence load: ‘ + str(t_dbm.load(‘address‘))) t_pickle = PicklePersistence() obj = {‘name‘: ‘GJRS‘, ‘age‘: 27, ‘skills‘:[‘Android‘, ‘C‘, ‘Python‘, ‘Web‘]} t_pickle.save(obj) print(‘PicklePersistence load: ‘ + str(t_pickle.load())) t_shelve = ShelvePersistence() obj1 = {‘name‘: ‘WL‘, ‘age‘: 27, ‘skills‘: [‘Test‘, ‘AutoTest‘]} obj2 = {‘name‘: ‘GJRS‘, ‘age‘: 27, ‘skills‘: [‘Android‘, ‘C‘, ‘Python‘, ‘Web‘]} t_shelve.save(‘obj1‘, obj1) t_shelve.save(‘obj2‘, obj2) print(‘ShelvePersistence load: ‘ + str(t_shelve.load(‘obj1‘))) print(‘ShelvePersistence load: ‘ + str(t_shelve.load(‘objn‘)))
目的:提高缓存效率,减少不必要的磁盘读写,减轻服务器压力
方法:每次缓存前,检测本地是否有缓存,数据是否发生更新,是否存在数据的添加删除,如果数据没有发生变化,就不再下载缓存,否则更新缓存数据
对数据进行压缩,可以减少磁盘的消耗,缺点:压缩消耗一部分时间
在保存到磁盘之前使用压缩即可
#zlib压缩 fp.write(zlib.compress(pickle.drumps(result)) #加载的时候解压即可 pickle.loads(zlib.decompress(fp.read())
上面介绍了常见本地磁盘文件型的持久化,我们学习完一定会有疑惑,如果我的数据量巨大巨复杂怎么办,如果还是使用本地磁盘文件型的持久化那得多蛋疼啊,是的,所以我们现在来讨论关于 Python 爬虫的另一类缓存持久化方式 —— 数据库持久化。
‘‘‘ Python3 sqlite3数据库持久化演示 ‘‘‘ import sqlite3 class Sqlite3Persistence(object): def __init__(self): self.db = None def connect(self): try: self.db = sqlite3.connect("Sqlite3Persistence.db") sql_create_table = """CREATE TABLE IF NOT EXISTS `DemoTable` ( `id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` CHAR(512) NOT NULL, `content` TEXT NOT NULL)""" self.db.execute(sql_create_table) except Exception as e: print("sqlite3 connect failed." + str(e)) def close(self): try: if self.db is not None: self.db.close() except BaseException as e: print("sqlite3 close failed."+str(e)) def insert_table_dict(self, dict_data=None): if dict_data is None: return False try: cols = ‘, ‘.join(dict_data.keys()) values = ‘"," ‘.join(dict_data.values()) sql_insert = "INSERT INTO `DemoTable`(%s) VALUES (%s)" % (cols, ‘"‘+values+‘"‘) self.db.execute(sql_insert) self.db.commit() except BaseException as e: self.db.rollback() print("sqlite3 insert error." + str(e)) return True def get_dict_by_name(self, name=None): if name is None: sql_select_table = "SELECT * FROM `DemoTable`" else: sql_select_table = "SELECT * FROM `DemoTable` WHERE name==%s" % (‘"‘+name+‘"‘) cursor = self.db.execute(sql_select_table) ret_list = list() for row in cursor: ret_list.append({‘id‘: row[0], ‘name‘: row[1], ‘content‘: row[2]}) return ret_list if __name__ == ‘__main__‘: t_sqlite3 = Sqlite3Persistence() t_sqlite3.connect() t_sqlite3.insert_table_dict({‘name‘: ‘Test1‘, ‘content‘: ‘XXXXXXXXXXXXX‘}) t_sqlite3.insert_table_dict({‘name‘: ‘Test2‘, ‘content‘: ‘vvvvvvvvvvvv‘}) t_sqlite3.insert_table_dict({‘name‘: ‘Test3‘, ‘content‘: ‘qqqqqqqqqqqq‘}) t_sqlite3.insert_table_dict({‘name‘: ‘Test4‘, ‘content‘: ‘wwwwwwwwwwwww‘}) print(‘Sqlite3Persistence get Test2: ‘ + str(t_sqlite3.get_dict_by_name(‘Test2‘))) print(‘Sqlite3Persistence get All: ‘ + str(t_sqlite3.get_dict_by_name()))
‘‘‘ Python3 MySQL数据库持久化演示 ‘‘‘ import pymysql class MySQLPersistence(object): def __init__(self): self.db = None self.cursor = None def connect(self): try: self.db = pymysql.connect("localhost", "yanbober", "TQJJtaJWNbGAMU44", "database_yan_php") self.db.set_charset(‘utf8‘) self.cursor = self.db.cursor() sql_create_table = """CREATE TABLE IF NOT EXISTS `StudentTable` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(512) COLLATE utf8_bin NOT NULL, `content` TEXT COLLATE utf8_bin NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=1""" self.cursor.execute(sql_create_table) except Exception as e: print("mysql connect failed." + str(e)) def close(self): try: if self.db is not None: self.db.close() if self.cursor is not None: self.cursor.close() except BaseException as e: print("mysql close failed."+str(e)) def insert_table_dict(self, dict_data=None): if self.db is None or self.cursor is None: print(‘Please ensure you have connected to mysql server!‘) return False if dict_data is None: return False try: cols = ‘, ‘.join(dict_data.keys()) values = ‘"," ‘.join(dict_data.values()) sql_insert = "INSERT INTO `StudentTable`(%s) VALUES (%s)" % (cols, ‘"‘+values+‘"‘) self.cursor.execute(sql_insert) self.db.commit() except BaseException as e: self.db.rollback() print("mysql insert error." + str(e)) return True def get_dict_by_name(self, name=None): if self.db is None or self.cursor is None: print(‘Please ensure you have connected to mysql server!‘) return None if name is None: sql_select_table = "SELECT * FROM `StudentTable`" else: sql_select_table = "SELECT * FROM `StudentTable` WHERE name=%s" % (‘"‘+name+‘"‘) self.cursor.execute(sql_select_table) ret_list = list() for item in self.cursor.fetchall(): ret_list.append({‘id‘: item[0], ‘name‘: item[1], ‘content‘: item[2]}) return ret_list if __name__ == ‘__main__‘: t_mysql = MySQLPersistence() t_mysql.connect() t_mysql.insert_table_dict({‘name‘: ‘Test1‘, ‘content‘: ‘XXXXXXXXXXXXX‘}) t_mysql.insert_table_dict({‘name‘: ‘Test2‘, ‘content‘: ‘vvvvvvvvvvvv‘}) t_mysql.insert_table_dict({‘name‘: ‘Test3‘, ‘content‘: ‘qqqqqqqqqqqq‘}) t_mysql.insert_table_dict({‘name‘: ‘Test4‘, ‘content‘: ‘wwwwwwwwwwwww‘}) print(‘MySQLPersistence get Test2: ‘ + str(t_mysql.get_dict_by_name(‘Test2‘))) print(‘MySQLPersistence get All: ‘ + str(t_mysql.get_dict_by_name())) t_mysql.close()
上面我们主要介绍了 python3.X 中关系型数据库 mysql、sqlite 的使用,下面我们继续介绍 Python3.X 爬虫中常用的非关系型数据库,先要介绍的是 MongoDB,它是一个基于分布式文件存储的数据库,是为 WEB 应用提供可扩展的高性能数据存储而诞生的,是一个介于关系数据库和非关系数据库之间的东西,也是非关系数据库中功能最丰富、最像关系数据库的数据库。
import pymongo ‘‘‘ Python3 MongoDB数据库持久化演示 ‘‘‘ class MongoDBPersistence(object): def __init__(self): self.conn = None self.database = None def connect(self, database): try: self.conn = pymongo.MongoClient(‘mongodb://localhost:27017/‘) self.database = self.conn[database] except Exception as e: print("MongoDB connect failed." + str(e)) def close(self): try: if self.conn is not None: self.conn.close() except BaseException as e: print("MongoDB close failed."+str(e)) def insert_table_dict(self, dict_data=None): if self.conn is None or self.database is None: print(‘Please ensure you have connected to MongoDB server!‘) return False if dict_data is None: return False try: collection = self.database[‘DemoTable‘] collection.save(dict_data) except BaseException as e: print("MongoDB insert error." + str(e)) return True def get_dict_by_name(self, name=None): if self.conn is None or self.database is None: print(‘Please ensure you have connected to MongoDB server!‘) return None collection = self.database[‘DemoTable‘] if name is None: documents = collection.find() else: documents = collection.find({"name": name}) document_list = list() for document in documents: document_list.append(document) return document_list if __name__ == ‘__main__‘: t_mysql = MongoDBPersistence() t_mysql.connect("DemoDatabase") t_mysql.insert_table_dict({‘name‘: ‘Test1‘, ‘content‘: ‘XXXXXXXXXXXXX‘}) t_mysql.insert_table_dict({‘name‘: ‘Test2‘, ‘content‘: ‘vvvvvvvvvvvv‘}) t_mysql.insert_table_dict({‘name‘: ‘Test3‘, ‘content‘: ‘qqqqqqqqqqqq‘}) t_mysql.insert_table_dict({‘name‘: ‘Test4‘, ‘content‘: ‘wwwwwwwwwwwww‘}) print(‘MongoDBPersistence get Test2: ‘ + str(t_mysql.get_dict_by_name(‘Test2‘))) print(‘MongoDBPersistence get All: ‘ + str(t_mysql.get_dict_by_name())) t_mysql.close()
Tips:缓存持久化前我们可以对缓存比较大的文本数据先进行压缩等处理再存储,这样可以节约存储。
通过上面常见的 Python3.X 各种持久化方式介绍我们至少应该知道在爬虫需要缓存持久化时我们可以有很多种选择,至于如上所有持久化如何选型其实是依赖于我们自己爬虫需求来决定的,不同的需求可能需要用不同的持久化类型,不过还是有一些参考策略来指导我们进行爬虫持久化选型的,即我们需要认清上面那些持久化各自的优劣点。
对于本地文件型持久化其实优劣点是很明显的,譬如上面介绍的有些支持序列化存储,有些支持同一文件下多 key-value 对存储,但是数据规模一旦庞大,本地文件存储不仅效率低下,还容易出现数据故障,备份十分麻烦,总之只适用于轻量级本地单一数据格式存储,也就是比较适合我们自己编写的一些小爬虫程序。
对于 Sqlite 数据库存储来说基本上只能认为是本地文件型存储的一个关系型升级,有效的改善了本地磁盘文件存储关系型数据的诟病,但是因为其为单机型迷你数据库,在数据存储量级和数据故障方面也是有瓶颈限制的,至于在本地文件型存储和 Sqlite 的选型时我觉得重点要衡量爬虫有用数据的关系,日后数据间关联紧密,需要互相依赖查找的情况使用 Sqlite 似乎更胜一筹。
对于 MySQL 等关系型数据库存储和 MongoDB 等非关系型数据库存储的优劣比较其实在网上已经有很多文章谈论多年了,不过在爬虫时到底如何选择其实还是取决于我们自己的需求定位,对于关系型数据库存储其具备高结构化数据、结构化查询语言、数据和关系都存储在单独的表中,而对于非关系型数据库存储其具备高可用、高性能、高伸缩性、没有声明性查询语言、使用键值对、列、文档、图形等存储、存储数据不可预知及无结构化可言。我们很多时候的爬虫需求都是爬取某一垂直需求下的海量数据来进行建模数据分析的,对于这种情况其实更加适合使用 MongoDB 来进行爬虫数据存储;而又有些时候我们爬虫数据可能具备高度的结构化封装和关联,我们想将爬取数据用来提供给其他平台进行 API 接口访问,在这种情况下似乎使用 MySQL 是一个不错的选择。
总之,Python3.X 爬虫缓存与持久化选型是需要依据我们需求来决定的,甚至有些情况下可能会出现多种持久化组合使用的情况,我们需要做到的是掌握和知道爬虫持久化可以有哪些选择,只有这样才能不变应万变。
标签:finally 如何 set 就是 arch save led god commit
原文地址:https://www.cnblogs.com/navysummer/p/9939640.html