标签:这一 ems ase span ati .com nbsp res page
直接copy官网实例会出现599的错误,百度了很久发现是因为证书的问题
添加这一句忽略证书 validate_cert = False
代码如下:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2016-05-21 20:21:32
# Project: tutorial_douban_moive
from pyspider.libs.base_handler import *
import re
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
url = "http://movie.douban.com/tag/"
self.crawl(url, callback=self.index_page, validate_cert = False) #on_start获得要爬的url然后将参数传给index_page,validate_cert = False 忽略证书
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc(‘a[href^="http"]‘).items():
if re.match("https://movie.douban.com/tag/\w+", each.attr.href, re.U): #进行正则匹配,并保存到each.attr.href中
self.crawl(each.attr.href, callback=self.list_page, validate_cert = False) #对爬的url进行处理,只提取https://movie.douban.com/tag/\w+里面的东西
def list_page(self, response):
for each in response.doc(‘#content > div > div.article > div > table tr > td > div > a‘).items(): #这一句是来自css selector,右键要爬的链接然后审视元素(chrome是检查)选copy→
copy selector
#print each
self.crawl(each.attr.href, callback=self.detail_page, validate_cert = False) #传递给下面处理
@config(priority=2)
def detail_page(self, response):
return {
"url": response.url,
"title": response.doc(‘#content > h1 > span‘).text(), ##content > h1 > span 同样来自于CSS中,获得你想要的参数
"rating": response.doc(‘#interest_sectl > div.rating_wrap.clearbox > div.rating_self.clearfix > strong‘).text(),
"director": [x.text() for x in response.doc(‘#info > span:nth-child(1) > span.attrs > a‘).items()],
}
标签:这一 ems ase span ati .com nbsp res page
原文地址:http://www.cnblogs.com/tangbinghaochi/p/6137030.html