标签:like 接口 图片 return gif display 后缀 iba opened
* 保证parse方法返回一个可迭代类型的对象(存储解析到页面内容)
* 使用终端指定完成数据存储到磁盘文件的操作
scrapy crawl 爬虫文件名称 -o 磁盘文件.后缀
def parse(self, response): # 建议使用xpath进行解析(框架集成了xpath解析的接口) div_list = response.xpath(‘//div[@id="content-left"]/div ‘) # 存储解析到的页面数据 data_list = [] for div in div_list: # xpath解析到的指定内容存储到了Selector对象 # extract()该方法可以将Selector对象存储中存储的数据值拿到 author = div.xpath(‘./div/a[2]/h2/text()‘).extract_first() # extract_first = extract()[0] content = div.xpath(‘.//div[@class="content"]/span/text()‘).extract_first() data_dict = { ‘author‘:author, ‘content‘:content } data_list.append(data_dict) return data_list
* items: 存储解析到的页面数据
* piplines: 处理持久化存储的相关操作
* 代码流程:
class QiubaiSpider(scrapy.Spider): name = ‘qiubai‘ # allowed_domains = [‘www.qiushibaike.com/text‘] start_urls = [‘https://www.qiushibaike.com/text/‘] def parse(self, response): # 建议使用xpath进行解析(框架集成了xpath解析的接口) div_list = response.xpath(‘//div[@id="content-left"]/div ‘) # 存储解析到的页面数据 data_list = [] for div in div_list: # xpath解析到的指定内容存储到了Selector对象 # extract()该方法可以将Selector对象存储中存储的数据值拿到 author = div.xpath(‘./div/a[2]/h2/text()‘).extract_first() # extract_first = extract()[0] content = div.xpath(‘.//div[@class="content"]/span/text()‘).extract_first() # 将解析到的数据值(author和content)存储到items对象 item = SpiderqiubaiItem() item[‘author‘] = author item[‘content‘] = content # 将item对象提交给管道 yield item
import scrapy class SpiderqiubaiItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() author = scrapy.Field() content = scrapy.Field()
class SpiderqiubaiPipeline(object): fp = None # 在整个爬虫过程中,该方法只会在开始爬虫的时候被调用一次 def open_spider(self, spider): print(‘开始爬虫‘) self.fp = open(‘./qiubai_pipe.txt‘, ‘w‘, encoding=‘utf-8‘) # 该方法可以接收爬虫文件提交过来的对象,并且对item对象中存储的页面数据进行持久化存储 # 参数:item表示的就是接收到的item对象 # 每当爬虫文件向管道提交一次item 则该方法就会执行一次 def process_item(self, item, spider): # 取出item对象中存储的数据值 author = item[‘author‘] content = item[‘content‘] # 持久化存储 self.fp.write(author+":"+content+"\n\n\n") return item # 该方法只会在爬虫结束的时候被调用一次 def close_spider(self, spider): print("爬虫结束") self.fp.close()
* 代码流程:
class SpiderqiubaiPipeline(object): conn = None # 在整个爬虫过程中,该方法只会在开始爬虫的时候被调用一次 def open_spider(self, spider): # 连接数据库 self.conn = pymysql.Connect(host=‘192.168.1.10‘, port=3306, user=‘root‘, password=‘cs1993413‘, db=‘qiubai‘) # 该方法可以接收爬虫文件提交过来的对象,并且对item对象中存储的页面数据进行持久化存储 # 参数:item表示的就是接收到的item对象 # 每当爬虫文件向管道提交一次item 则该方法就会执行一次 def process_item(self, item, spider): # 1 连接数据库 # 2 执行sql语句 sql = ‘insert into qiubai values("%s", "%s")‘ %(item[‘author‘], item[‘content‘]) self.cursor = self.conn.cursor() try: self.cursor.execute(sql) self.conn.commit() except Exception as e: self.conn.rollback() # 3 提交事务 # 取出item对象中存储的数据值 return item # 该方法只会在爬虫结束的时候被调用一次 def close_spider(self, spider): self.conn.close()
class SpiderqiubaiPipeline(object): conn = None # 在整个爬虫过程中,该方法只会在开始爬虫的时候被调用一次 def open_spider(self, spider): # 连接数据库 self.conn = redis.Redis(host=‘192.168.1.10‘, port=6379) def process_item(self, item, spider): data_dict = { ‘author‘: item[‘author‘], ‘content‘: item[‘content‘] } self.conn.lpush(‘data‘, data_dict) return item
将数据同时存在本地以及数据库和redis上
# 将数据值存储到本地磁盘中 class QiubaiByFiels(object): fp = None def open_spider(self, spider): print(‘开始爬虫‘) self.fp = open(‘./qiubai_pipe.txt‘, ‘w‘, encoding=‘utf-8‘) def process_item(self, item, spider): author = item[‘author‘] content = item[‘content‘] self.fp.write(author + ":" + content + "\n\n\n") return item def close_spider(self, spider): print("爬虫结束") self.fp.close()
将数据值存储到mysql数据库中
class QiubaiByMysql(object): conn = None def open_spider(self, spider): self.conn = pymysql.Connect(host=‘192.168.1.10‘, port=3306, user=‘root‘, password=‘cs1993413‘, db=‘qiubai‘) def process_item(self, item, spider): sql = ‘insert into qiubai values("%s", "%s")‘ % (item[‘author‘], item[‘content‘]) self.cursor = self.conn.cursor() try: self.cursor.execute(sql) self.conn.commit() except Exception as e: self.conn.rollback() return item def close_spider(self, spider): self.conn.close()
settings.py
ITEM_PIPELINES = { ‘spiderqiubai.pipelines.SpiderqiubaiPipeline‘: 300, ‘spiderqiubai.pipelines.QiubaiByMysql‘: 200, ‘spiderqiubai.pipelines.QiubaiByFiels‘: 100, }
标签:like 接口 图片 return gif display 后缀 iba opened
原文地址:https://www.cnblogs.com/harryblog/p/11356354.html