标签:持久化 ext main color info 其他 php src item
ImagesPipeline:专门用作于二进制数据下载和持久化存储的管道类。建议在爬虫文件中进行数据解析,不建议在爬虫文件中直接进行数据存储。
图片懒加载:应用到标签的伪属性,数据捕获的时候一定是基于伪属性进行。
# -*- coding: utf-8 -*-
import scrapy
from imgPro.items import ImgproItem
class ImgSpider(scrapy.Spider):
name = ‘img‘
start_urls = [‘http://sc.chinaz.com/tupian/meinvtupian.html‘]
def parse(self, response):
div_list = response.xpath(‘//*[@id="container"]/div‘)
for div in div_list:
# 伪属性(反爬机制,也是网站优化的一种方法):不是src,而是src2,但在浏览器中随着鼠标的拖动会变为src
img_src = div.xpath(‘./div/a/img/@src2‘).extract_first()
item = ImgproItem()
item[‘img_src‘] = img_src
# 将图片地址提交给ImagesPipeline的管道类
yield item
----------------------
# -*- coding: utf-8 -*-
# 导入ImagesPipeline管道类
from scrapy.pipelines.images import ImagesPipeline
# 要导入scrapy进行发送请求
import scrapy
# 要重写父类方法
class ImgproPipeline(ImagesPipeline):
# 是用来对媒体资源进行请求的(数据下载),参数item就是接收到的爬虫类提交的item对象
def get_media_requests(self, item, info):
# 不需要传入回调函数
yield scrapy.Request(item[‘img_src‘])
# 指明数据存储的路径,只能指定图片的名称,图片的具体路径需在settings.py中指定
def file_path(self, request, response=None, info=None):
return request.url.split(‘/‘)[-1]
# 将item传递个下一个即将被执行的管道类
def item_completed(self, results, item, info):
return item
-------------------
# settings.py
...
# 配置图片存储文件夹的路径
IMAGES_STORE = ‘./imgLibs‘
-------------------
一种基于scrapy进行全站数据爬取的一种新的技术手段。
CrawlSpider就是Spider的一个子类
使用流程:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor # 连接提取器
from scrapy.spiders import CrawlSpider, Rule # 规则解析器
from sunCrawlPro.items import SuncrawlproItem,Detail_item # 手动导入
class SunSpider(CrawlSpider):
name = ‘sun‘
start_urls = [‘http://wz.sun0769.com/index.php/question/questionType?type=4&page=‘]
# 实例化了一个链接提取器对象,作用:根据指定规则(allow=r’正则表达式‘)进行指定链接的提取。
link = LinkExtractor(allow=r‘type=4&page=\d+‘) # 获取页码连链接
# 获取新闻详情页的连接
link_detail = LinkExtractor(allow=r"question/\d+/\d+\.shtml")
rules = (
# 将link作用到了Rule构造方法的参数1中,作用:将链接提取器提取到的链接进行请求发送且根据指定规则对请求到的数据进行数据解析
Rule(link, callback=‘parse_item‘, follow=False), # follow=False,只取首页符合要求的链接并发送请求。
# follow=True:将链接提取器继续作用到链接提取器提取到的链接所对应的页面中,从而获取整个网站中全部符合规则的链接。
Rule(link_detail, callback=‘parse_detail‘),# follow默认为False
)
def parse_item(self, response):
# xpath表达式中不可以出现tbody标签
tr_list = response.xpath(‘//*[@id="morelist"]/div/table[2]//tr/td/table//tr‘)
for tr in tr_list:
title = tr.xpath(‘./td[2]/a[2]/text()‘).extract_first()
num = tr.xpath(‘./td[1]/text()‘).extract_first()
item = SuncrawlproItem()
item[‘title‘] = title
item[‘num‘] = num
yield item
def parse_detail(self,response):
content = response.xpath(‘/html/body/div[9]/table[2]//tr[1]/td/text()‘).extract_first()
num = response.xpath(‘/html/body/div[9]/table[1]//tr/td[2]/span[2]/text()‘).extract_first()
num = num.split(‘:‘)[-1]
item = Detail_item()
item[‘content‘] = content
item[‘num‘] = num
yield item
-----------------
# -*- coding: utf-8 -*-
# Define your item pipelines here
class SuncrawlproPipeline(object):
def process_item(self, item, spider):
if item.__class__.__name__ == ‘Detail_item‘:
content = item[‘content‘]
num = item[‘num‘]
print(item)
else:
title = item[‘title‘]
num = item[‘num‘]
print(item)
return item
概念:需要搭建一个分布式的机群,然后在机群的每一台电脑中执行同一组程序,让其对某一个网站的数据进行联合分布爬取。
原生的scrapy框架不可以实现分布式:因为调度器不可以被共享;管道不可以被共享。
实现分布式:scrapy+scrapy_redis实现分布式
scrapy-redis组件:
第一步:pip install scrapy-redis
第二步:创建工程.
第三步:cd 工程目录中.
第四步,创建爬虫文件(两种选择):
第五步,修改爬虫类:
第六步,settings配置文件的配置:
UA伪装、Robots.
管道的指定:ITEM_PIPELINES = {‘scrapy_redis.pipelines.RedisPipeline‘: 400}
指定调度器:
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
指定redis数据库:
REDIS_HOST = ‘redis服务的ip地址‘
REDIS_PORT = 6379
第七步,redis的配置文件(redis.windows.conf)进行配置:
第八步,启动redis的服务端和客户端:
redis-server.exe redis.windows.conf
(一定要携带配置文件进行启动)redis-cli
第九步,启动程序:scrapy runspider xxx.py
第十步,向调度器的队列中仍入一个起始的url:
lpush fbsQueue http://wz.sun0769.com/index.php/question/questionType?type=4&page= [value …]
# -*- coding: utf-8 -*-
# scrapy/fbs.py
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider # 手动导入
from fbsPro.items import FbsproItem
class FbsSpider(RedisCrawlSpider):
name = ‘fbs‘
# allowed_domains = [‘www.xxx.com‘]
# start_urls = [‘http://www.xxx.com/‘]
redis_key = ‘fbsQueue‘# 表示的是可以被共享的调度器队列的名称
rules = (
Rule(LinkExtractor(allow=r‘type=4&page=\d+‘), callback=‘parse_item‘, follow=True),
)
def parse_item(self, response):
tr_list = response.xpath(‘//*[@id="morelist"]/div/table[2]//tr/td/table//tr‘)
for tr in tr_list:
title = tr.xpath(‘./td[2]/a[2]/text()‘).extract_first()
status= tr.xpath(‘./td[3]/span/text()‘).extract_first()
item = FbsproItem()
item[‘title‘] = title
item[‘status‘] = status
yield item
# settings.py
BOT_NAME = ‘fbsPro‘
USER_AGENT = ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36‘
SPIDER_MODULES = [‘fbsPro.spiders‘]
NEWSPIDER_MODULE = ‘fbsPro.spiders‘
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 3
ITEM_PIPELINES = {
‘scrapy_redis.pipelines.RedisPipeline‘: 400
}
# 增加了一个去重容器类的配置, 作用使用Redis的set集合来存储请求的指纹数据, 从而实现请求去重的持久化
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用scrapy-redis组件自己的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 配置调度器是否要持久化, 也就是当爬虫结束了, 要不要清空Redis中请求队列和去重指纹的set。
# 如果是True, 就表示要持久化存储, 就不清空数据, 否则清空数据,可实现增量式。
SCHEDULER_PERSIST = True
REDIS_HOST = ‘192.168.18.36‘
REDIS_PORT = 6379
# items.py
import scrapy
class FbsproItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
status = scrapy.Field()
概念:用于监测网站数据更新的情况。
核心机制:去重。可以使用redis的set实现去重。
# settings.py
# -*- coding: utf-8 -*-
BOT_NAME = ‘zjsPro‘
USER_AGENT = ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36‘
SPIDER_MODULES = [‘zjsPro.spiders‘]
NEWSPIDER_MODULE = ‘zjsPro.spiders‘
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
LOG_LEVEL = ‘ERROR‘
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
‘zjsPro.pipelines.ZjsproPipeline‘: 300,
}
# scrapy/zjs.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from zjsPro.items import ZjsproItem
from redis import Redis
class ZjsSpider(CrawlSpider):
conn = Redis(host=‘127.0.0.1‘,port=6379)
name = ‘zjs‘
# allowed_domains = [‘www.xxx.com‘]
start_urls = [‘https://www.4567tv.tv/index.php/vod/show/class/%E7%88%B1%E6%83%85/id/1.html‘]
rules = (
Rule(LinkExtractor(allow=r‘/page/\d+\.html‘), callback=‘parse_item‘, follow=False),
)
def parse_item(self, response):
li_list = response.xpath(‘/html/body/div[1]/div/div/div/div[2]/ul/li‘)
for li in li_list:
name = li.xpath(‘./div/a/@title‘).extract_first()
detail_url = ‘https://www.4567tv.tv‘+li.xpath(‘./div/a/@href‘).extract_first()
item = ZjsproItem()
item[‘name‘] = name
# 可以将爬过的电影的详情页的url记录起来
# ex == 0:数据插入失败 ex==1:数据插入成功
ex = self.conn.sadd(‘movie_detail_urls‘,detail_url)
if ex:
print(‘捕获到最新更新出来的数据.‘)
yield scrapy.Request(detail_url,callback=self.parse_detail,meta={‘item‘:item})
else:
print(‘暂无数据的更新.‘)
def parse_detail(self,response):
desc = response.xpath(‘/html/body/div[1]/div/div/div/div[2]/p[5]/span[2]/text()‘).extract_first()
item = response.meta[‘item‘]
item[‘desc‘] = desc
yield item
# items.py
# -*- coding: utf-8 -*-
import scrapy
class ZjsproItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
desc = scrapy.Field()
# pipelines.py
# -*- coding: utf-8 -*-
class ZjsproPipeline(object):
def process_item(self, item, spider):
conn = spider.conn
conn.lpush(‘moiveData‘,item)
return item
常见反爬机制:
标签:持久化 ext main color info 其他 php src item
原文地址:https://www.cnblogs.com/wby-110/p/13508793.html