标签:als 保存 roc 网页 encoding sts pool stat 其他
import requests#d导入requests模块
from multiprocessing import Pool#进程池
from requests.exceptions import RequestException#用于异常处理
import json
import re#导入正则表达式
headers={
‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36‘
}
def get_one_page(url):#请求单页内容的方法
try:
response = requests.get(url,headers=headers)#用requests.get请求url
if response.status_code == 200:#判断,如果请求到的状态码==200(请求成功)
return response.text#返回网页内容
return None#如果是其他状态码返回None
except RequestException:
return None
def parse_one_page(html):#解析单页内容
pattern = re.compile(‘<dd>.*?board-index.*?>(\d+)</i>.*?src="(.*?)".*?name"><a‘+‘.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>‘
+‘.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>‘,re.S)#编写正则表达式
items = re.findall(pattern,html)#应用已编好的正则表达式
for item in items:#遍历
yield {
‘排名‘:item[0],
‘图片地址‘: item[1],
‘标题‘: item[2],
‘主演‘: item[3].strip()[3:],
‘上映时间‘: item[4].strip()[5:],
‘评分‘: item[5]+item[6]
}
def write_to_file(content):#将爬取结果保存到resuli.txt
with open(‘result.txt‘,‘a‘,encoding=‘utf-8‘) as f:
f.write(json.dumps(content,ensure_ascii=False)+‘\n‘)
f.close()
def main(offset):
url = ‘http://maoyan.com/board/4?offset=‘+str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__=="__main__":
pool = Pool()#构造进程池
pool.map(main,[i*10 for i in range(10)])#爬取多页
标签:als 保存 roc 网页 encoding sts pool stat 其他
原文地址:https://www.cnblogs.com/liunaiming/p/12234895.html