标签:headers 检测 .com data- 代理 utf-8 processor code 生成
web开发中,同一个url往往可以对应若干套不同的数据(或者界面,如手机、电脑),后台可以根据发起请求的前端的用户代理的不同,而决定应该给前端做出什么样的响应
如果检测到没有用户代理可以拒绝访问
解决方案:伪装请求头
创建 Request 对象req = request.Request(url=url, headers={用户代理})
1 req = request.Request(url=url,headers={‘UserAgent‘:‘Mozilla/5.0 (Windows NT 10.0; Win64;x64)AppleWebKit/537.36 (KHTML, like Gecko)Chrome/71.0.3578.80Safari/537.36‘,‘cookie‘:‘_T_WM=e75d066bb30fae02106ed2a058e3ba08;SUB=_2A25xuVTDeRhGeBN7FIR9izJyTSIHXVTQmHbrDV6PUJbktANLVjZkW1NRFYvPzVjmAtKAY7Kppc7xOninRZqgesm;SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W59rVpGeZ7yg7I7HR0hyYPg5JpX5KzhUgL.Foq0S057Sozfeon2dJLoI05LxKML1heLBBLxKqL1heL1hLxKML12L1hBLxKqLBoeLBKzLxKqLBoeLBKz41K.t;SUHB=04WeHU67Q84JrJ‘}) 2 用Request对象可以给请求加上请求头,使得请求伪装成浏览器等终端
用加入了请求头的请求对象发起请求
1 res = request.urlopen(req) 2 print(res.status) 3 # 打印状态码
写入本地
1 with open("wei.html","wb") as fp: 2 fp.write(res.read())
1 from urllib import request,parse 2 url = ‘https://fanyi.baidu.com/sug‘
1. 请求头
1 headers = {‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36‘}
2. 请求体
1 data = {"kw":"a"} 2 data = parse.urlencode(data).encode("utf-8") 3 post提交的数据是二进制,需要用utf-8编码 4 5 print(data)
3. 用前面的请求头、请求体和url来创建请求对象
req = request.Request(url=url,headers=headers,data=data)
4. 发起请求
1 res = request.urlopen(req) 2 print(res.read())
1 from urllib import request,parse 2 from http import cookiejar
1 cookie = cookiejar.CookieJar() # 初始化一个cookie对象 2 handler = request.HTTPCookieProcessor(cookie) # 创建一个handler对象,携带上cookie 3 opener = request.build_opener(handler) # 创建一个opener对象携带上handler
1 headers = {‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36‘}
1 login_url = ‘http://www.jokeji.cn/user/c.asp?‘
1 dic = {‘u‘:‘bobo666‘,‘p‘:‘a12345678‘,‘sn‘:‘1‘,‘t‘:‘big‘} 2 params = parse.urlencode(dic) 3 login_url += params
1 login_req = request.Request(url=login_url,headers=headers)
1 res = opener.open(login_req) # 用opener来发起请求 2 print(res.read().decode(‘utf-8‘)) # 此时发起的请求结束以后,相关的cookie信息就会被opener的handler通过cookiejar对象保存
1 page_url = "http://www.jokeji.cn/User/MemberCenter.asp" 2 3 page_req = request.Request(url=page_url,headers=headers)
#res = request.urlopen(page_req) # 虽然前面已经登录成功,但是cookie信息没有被保存,仍然不成功
1 import ssl 2 ssl._create_default_https_context = ssl._create_unverified_context 3 4 from urllib import request,parse 5 from time import sleep 6 import re 7 8 # 1、【数据的获取】 9 # 封装一个函数,用于将url转化成一个请求对象 10 def request_by(url,page): 11 headers = { 12 ‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36‘} 13 if page==1: 14 page_url = url + ".html" 15 else: 16 page_url = url +"_"+ str(page) + ".html" 17 print("正在访问:",page_url) 18 req = request.Request(url=page_url,headers=headers) 19 return req 20 21 # 封装一个函数,用于对请求对象发起请求并且把响应体返回出去 22 def get_html_from(req): 23 res = request.urlopen(req) 24 # 每请求一次要休眠一段时间 25 sleep(1) 26 return res.read().decode("utf-8") 27 28 # 2、【数据的解析】 29 def anylasis_data(html): 30 pat = re.compile(r‘<div class="box picblock.*?<img src2="(.*?)"‘,re.S) 31 imgs = pat.findall(html) 32 return imgs 33 34 # 3、数据的存储 35 def download_imgs(imgs): 36 for img in imgs: 37 # http://pic1.sc.chinaz.com/Files/pic/ 38 # pic9/201904/zzpic17564_s.jpg 39 # 生成图片的名字 40 img_name = img.split("/")[-1] 41 print("正在下载图片:",img) 42 request.urlretrieve(url=img,filename="./meinv/"+img_name) 43 sleep(1) 44 45 if __name__ == ‘__main__‘: 46 page_url = "http://sc.chinaz.com/tupian/meinvxiezhen" 47 48 for i in range(1,2): 49 req = request_by(url=page_url,page=i) 50 res = get_html_from(req) 51 imgs = anylasis_data(res) 52 download_imgs(imgs)
爬虫基础框架 之urllib(一) --- urllib post请求
标签:headers 检测 .com data- 代理 utf-8 processor code 生成
原文地址:https://www.cnblogs.com/TMMM/p/10800047.html