标签:百度 range x11 enc byte love decode div dict
# 导入urllib.request import urllib.request # 向指定的url发送请求,并返回服务器响应的类文件对象 response = urllib.request.urlopen("http://www.baidu.com") # 类文件对象支持 文件对象的操作方法,如read()方法读取文件全部内容,返回字符串 html = response.read() # 打印字符串,记得加上decode(‘utf-8‘)方法,就不会出现\n\n print(html.decode(‘utf-8‘))
在我们第一个例子里,urlopen()的参数就是一个url地址;
但是如果需要执行更复杂的操作,比如增加HTTP报头,必须创建一个 Request 实例来作为urlopen()的参数;而需要访问的url地址则作为 Request 实例的参数。
import urllib.request url = "http://www.itcast.cn" #User-Agent是反爬虫斗争的第一步 list_headers={ "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" } # url 连同 headers,一起构造Request请求,这个请求将附带 IE9.0 浏览器的User-Agent request = urllib.request.Request(url, headers = list_headers) # 向服务器发送这个请求 response = urllib.request.urlopen(request) html = response.read() print(html.decode(‘utf-8‘))
浏览器 就是互联网世界上公认被允许的身份,如果我们希望我们的爬虫程序更像一个真实用户,那我们第一步,就是需要伪装成一个被公认的浏览器。用不同的浏览器在发送请求的时候,会有不同的User-Agent头。
#添加一个特定的header import urllib.request url = "http://www.itcast.cn" #IE 9.0 的 User-Agent header = {"User-Agent" : "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"} request = urllib.request.Request(url, headers = header) #也可以通过调用Request.add_header() 添加/修改一个特定的header request.add_header("Connection", "keep-alive") # 也可以通过调用Request.get_header()来查看header信息 # request.get_header(header_name="Connection") response = urllib.request.urlopen(request) #查看响应状态码200 print(response.code) html = response.read() print(html.decode(‘utf-8‘))
随机添加/修改User-Agent
#随机添加/修改User-Agent import urllib.request import random url = "http://www.itcast.cn" ua_list = [ "Mozilla/5.0 (Windows NT 6.1; ) Apple.... ", "Mozilla/5.0 (X11; CrOS i686 2268.111.0)... ", "Mozilla/5.0 (Macintosh; U; PPC Mac OS X.... ", "Mozilla/5.0 (Macintosh; Intel Mac OS... " ] user_agent = random.choice(ua_list) request = urllib.request.Request(url) #也可以通过调用Request.add_header() 添加/修改一个特定的header request.add_header("User-Agent", user_agent) # 第一个字母大写,后面的全部小写 request.get_header("User-agent") response = urllib.request.urlopen(request) html = response.read() print(html.decode(‘utf-8‘))
get请求url地址有中文,url地址为str
from urllib import request,parse url = "http://www.baidu.com/s" #User-Agent是反爬虫斗争的第一步 list_headers={ "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" } keyword = input("请输入需要查询的关键字: ") dict={ ‘wd‘:keyword } #parse.urlencode(dict)参数为字典类型 dict=parse.urlencode(dict) #wd=%E4%BC%A0%E6%99%BA%E6%92%AD%E5%AE%A2 print(dict) #dict为字符类型str print(type(dict)) fulurl=url+‘?‘+dict print(fulurl) #fulurl=https://www.baidu.com/s?wd=%E4%BC%A0%E6%99%BA%E6%92%AD%E5%AE%A2 req=request.Request(url=fulurl,headers = list_headers) respose=request.urlopen(req) print(respose.read().decode(‘utf-8‘))
首先我们创建一个python文件, tiebaSpider.py,我们要完成的是,输入一个百度贴吧的地址,比如:
百度贴吧LOL吧第一页:http://tieba.baidu.com/f?kw=lol&ie=utf-8&pn=0
第二页: http://tieba.baidu.com/f?kw=lol&ie=utf-8&pn=50
第三页: http://tieba.baidu.com/f?kw=lol&ie=utf-8&pn=100
发现规律了吧,贴吧中每个页面不同之处,就是url最后的pn的值,其余的都是一样的,我们可以抓住这个规律。
import urllib import urllib.request from urllib import parse def loadPage(url, filename): """ 作用:根据url发送请求,获取服务器响应文件 url: 需要爬取的url地址 filename : 处理的文件名 """ # print(type(filename)) print("正在下载 " + filename) headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"} request = urllib.request.Request(url, headers = headers) return urllib.request.urlopen(request).read() def writePage(html, filename): """ 作用:将html内容写入到本地 html:服务器相应文件内容 """ print("正在保存 " + filename) # 文件写入,文件打开方式有问题,把之前的打开语句修改为用二进制方式 with open(filename, "wb+") as f: f.write(html) print("-" * 30) def tiebaSpider(url, beginPage, endPage): """ 作用:贴吧爬虫调度器,负责组合处理每个页面的url url : 贴吧url的前部分 beginPage : 起始页 endPage : 结束页 """ for page in range(beginPage, endPage + 1): pn = (page - 1) * 50 filename = "第" + str(page) + "页.html" # print(type(filename)) fullurl = url + "&pn=" + str(pn) html = loadPage(fullurl, filename) writePage(html, filename) print("谢谢使用") if __name__ == "__main__": kw = input("请输入需要爬取的贴吧名:") beginPage = int(input("请输入起始页:")) endPage = int(input("请输入结束页:")) url = "http://tieba.baidu.com/f?" key = parse.urlencode({"kw": kw}) fullurl = url + key tiebaSpider(fullurl, beginPage, endPage)
import urllib import urllib.request from urllib import parse # POST请求的目标URL url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null" headers={"User-Agent": "Mozilla...."}
#formadata为post参数 formdata = { "type":"AUTO", "i":"i love python", "doctype":"json", "xmlVersion":"1.8", "keyfrom":"fanyi.web", "ue":"UTF-8", "action":"FY_BY_ENTER", "typoResult":"true" }
#post提交的参数为字节 data =bytes(parse.urlencode(formdata),encoding=‘utf-8‘) request = urllib.request.Request(url,data=data, headers = headers) response = urllib.request.urlopen(request) print(response.read())
from urllib import parse import urllib.request url = "https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action" headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"} formdata = { "start":"0", "limit":"20" } data = bytes(parse.urlencode(formdata),encoding=‘utf-8‘) request =urllib.request.Request(url, data = data, headers = headers) print(urllib.request.urlopen(request).read())
标签:百度 range x11 enc byte love decode div dict
原文地址:https://www.cnblogs.com/weihu/p/8983225.html