标签:fse accept requests awl domain html head war ext
以爬取斗鱼直播上的信息为例:
URL地址:http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset=0
爬取字段:房间ID、房间名、图片链接、存储在本地的图片路径、昵称、在线人数、城市
1.items.py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class DouyuspiderItem(scrapy.Item): # define the fields for your item here like: # 房间ID room_id = scrapy.Field() # 房间名 room_name = scrapy.Field() # 图片链接 vertical_src = scrapy.Field() # 存储图片的本地地址 image_path = scrapy.Field() # 昵称 nickname = scrapy.Field() # 在线人数 online = scrapy.Field() # 城市 anchor_city = scrapy.Field()
2.spiders/douyu.py
# -*- coding: utf-8 -*- import scrapy from douyuSpider.items import DouyuspiderItem import json class DouyuSpider(scrapy.Spider): name = ‘douyu‘ allowed_domains = [‘capi.douyucdn.cn‘] url = ‘http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset=‘ offset = 0 start_urls = [url + str(offset)] def parse(self, response): # 是否爬取下一页的标记 next_flag = False data = json.loads(response.text)["data"] for each in data: item = DouyuspiderItem() # 房间ID item[‘room_id‘] = each["room_id"] # 房间名 item[‘room_name‘] = each["room_name"] # 图片链接 item[‘vertical_src‘] = each["vertical_src"] # 昵称 item[‘nickname‘] = each["nickname"] # 在线人数 item[‘online‘] = each["online"] # 城市 item[‘anchor_city‘] = each["anchor_city"] next_flag = True yield item # 判断是否继续爬取下一页 if next_flag: self.offset += 20 yield scrapy.Request(self.url + str(self.offset), callback = self.parse)
3.pipelines.py
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don‘t forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html import json import scrapy from scrapy.pipelines.images import ImagesPipeline from scrapy.utils.project import get_project_settings import os # 存储信息的json文件中间件 class DouyuspiderPipeline(object): def __init__(self): self.file = open("斗鱼.json", "w", encoding = "utf-8") self.first_flag = True def process_item(self, item, spider): if self.first_flag: self.first_flag = False content = "[\n" + json.dumps(dict(item), ensure_ascii = False) else: content = ",\n" + json.dumps(dict(item), ensure_ascii = False) self.file.write(content) return item def close_spider(self, spider): self.file.write("\n]") self.file.close() # 下载图片的中间件 class ImagesPipeline(ImagesPipeline): IMAGES_STORE = get_project_settings().get("IMAGES_STORE") def get_media_requests(self, item, info): image_url = item["vertical_src"] yield scrapy.Request(image_url) def item_completed(self, results, item, info): # 固定写法,获取图片路径,同时判断这个路径是否正确,如果正确,就放到 image_path里,ImagesPipeline源码剖析可见 image_path = [x["path"] for ok, x in results if ok] os.rename(self.IMAGES_STORE + "/" + image_path[0], self.IMAGES_STORE + "/" + item["nickname"] + ".jpg") item["image_path"] = self.IMAGES_STORE + item["nickname"] + ".jpg" return item
4.settings.py
# -*- coding: utf-8 -*- # Scrapy settings for douyuSpider project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = ‘douyuSpider‘ SPIDER_MODULES = [‘douyuSpider.spiders‘] NEWSPIDER_MODULE = ‘douyuSpider.spiders‘ # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = ‘douyuSpider (+http://www.yourdomain.com)‘ # Obey robots.txt rules ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: DEFAULT_REQUEST_HEADERS = { ‘Accept‘: ‘text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8‘, ‘User-Agent‘: ‘DYZB/2.290 (iPhone; iOS 9.3.4; Scale/2.00)‘ # ‘Accept-Language‘: ‘en‘, } # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # ‘douyuSpider.middlewares.DouyuspiderSpiderMiddleware‘: 543, #} # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # ‘douyuSpider.middlewares.MyCustomDownloaderMiddleware‘: 543, #} # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = { # ‘scrapy.extensions.telnet.TelnetConsole‘: None, #} # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { ‘douyuSpider.pipelines.DouyuspiderPipeline‘: 300, ‘douyuSpider.pipelines.ImagesPipeline‘: 200, } # Images 的存放位置,之后会在pipelines.py里调用 IMAGES_STORE = "Images\\" # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = ‘httpcache‘ #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = ‘scrapy.extensions.httpcache.FilesystemCacheStorage‘
标签:fse accept requests awl domain html head war ext
原文地址:http://www.cnblogs.com/mayi0312/p/7259719.html