标签:err webdriver 自动 connect drag drive 高度 windows 完成
# 梨视频数据的爬取
import requests
from lxml import etree
import re
headers = {
‘User-Agent‘:‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36‘
}
url = ‘https://www.pearvideo.com/category_1‘
page_text = requests.get(url,headers=headers).text
tree = etree.HTML(page_text)
li_list = tree.xpath(‘//*[@id="listvideoListUl"]/li‘)
for li in li_list:
detail_url = ‘https://www.pearvideo.com/‘+li.xpath(‘./div/a/@href‘)[0]
title = li.xpath(‘./div/a/div[2]/text()‘)[0]+‘.mp4‘
detail_page_text = requests.get(detail_url,headers=headers).text
# 视频地址是由js代码动态生成的,只能用正则解析
ex = ‘srcUrl="(.*?)",vdoUrl‘
video_url = re.findall(ex,detail_page_text,re.S)[0]
video_data = requests.get(video_url,headers=headers).content
with open(title,‘wb‘) as fp:
fp.write(video_data)
import time
from time import sleep
start = time.time()
urls = [
‘www.a.com‘,
‘www.b.com‘,
‘www.c.com‘,
]
def get_request(url):
print("正在下载:",url)
sleep(2)
print(‘下载结束:‘,url)
for url in urls:
get_request(url)
print("总耗时为:",time.time()-start) # 6.0057127475738525s
from multiprocessing.dummy import Pool
start = time.time()
# 开启三个线程
pool = Pool(3)
# map有两个参数,第一个是自定义函数,第二个是列表。
# 作用:函数对列表当中的每一个元素进行相关操作。
pool.map(get_request,urls)
print("总耗时为:",time.time()-start) # 2.0103282928466797s
from flask import Flask
from time import sleep
app = Flask(__name__)
@app.route(‘/1‘)
def index_bobo():
sleep(2)
return ‘Hello 1‘
@app.route(‘/2‘)
def index_jay():
sleep(2)
return ‘Hello 2‘
@app.route(‘/3‘)
def index_tom():
sleep(2)
return ‘Hello 3‘
if __name__ == ‘__main__‘:
app.run(threaded=True)
import requests
start = time.time()
urls = [
‘http://localhost:5000/1‘,
‘http://localhost:5000/2‘,
‘http://localhost:5000/3‘,
]
def get_request(url):
page_text = requests.get(url).text
print(page_text)
pool = Pool(3)
pool.map(get_request,urls)
print(time.time()-start) # 4.036084413528442
协程:
任务对象
事件循环
回调函数
task.result()
接受特殊函数的返回值(任务对象的协程对象的特殊函数的返回值)执行顺序:
开启事件循环-->执行任务对象-->遇到阻塞-->切换执行的任务对象-->任意任务对象执行完成-->调用该任务对象的回调函数
aiohttp:支持异步网络请求的模块
# 协程
import asyncio
def callback(task):# 作为任务对象的回调函数
# task.result()接受特殊函数的返回值:haha
print(‘i am callback and ‘,task.result())
async def test():
print(‘i am test()‘)
return ‘haha‘
# c是协程对象
c = test()
c # <coroutine object test at 0x0000018119ACAB48>
# 传入协程对象,封装了一个任务对象
task = asyncio.ensure_future(c)
task.add_done_callback(callback)
# 创建一个事件循环的对象
loop = asyncio.get_event_loop()
# 任务对象注册到事件循环对象中
loop.run_until_complete(task)
# 多任务
import asyncio
import time
start = time.time()
async def get_request(url):
# 在特殊函数内部的实现中不可以出现不支持异步的模块代码
# await time.sleep(2)
# await:必须等到阻塞代码执行完后再执行后面的代码
await asyncio.sleep(2)
print(‘下载成功:‘,url)
urls = [
‘www.1.com‘,
‘www.2.com‘
]
tasks = []
for url in urls:
c = get_request(url)
task = asyncio.ensure_future(c)
tasks.append(task)
loop = asyncio.get_event_loop()
# loop.run_until_complete(asyncio.wait(tasks)),这是多任务,需要对任务进行挂起,
# 注意:挂起操作需要手动处理
loop.run_until_complete(asyncio.wait(tasks))
print(time.time()-start) # 2.0003480911254883
# 在爬虫中的应用
import aiohttp
import time
import asyncio
from lxml import etree
s = time.time()
urls = [
‘http://127.0.0.1:5000/1‘,
‘http://127.0.0.1:5000/2‘
]
# import requests
# async def get_request(url):
# requests不能实现异步
# page_text = requests.get(url).text
# return page_text
# 特殊的函数:请求发送和响应数据的捕获
async def get_request(url):
# with环境资源管理器,可以不用手动关闭相关的资源。ClientSession:实例化一个Session对象
async with aiohttp.ClientSession() as s:
# get参数与requests模块几乎相同,如#s.get(url,headers,proxy="http://ip:port",params),proxy(代理)
# ,get会阻塞,所以需要await
async with await s.get(url=url) as response:
# 获取响应数据,也要await
page_text = await response.text() # read()返回的是byte类型的数据
print(page_text)
# 细节:在每一个with前加上async,在每一个阻塞操作的前加上await
return page_text
#回调函数
def parse(task):
page_text = task.result()
tree = etree.HTML(page_text)
parse_data = tree.xpath(‘//text()‘)
print(parse_data)
tasks = []
for url in urls:
c = get_request(url)
task = asyncio.ensure_future(c)
task.add_done_callback(parse)
tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
print(time.time()-s) # 2.0419085025787354
selenium概念:是一个基于浏览器自动化的模块。
爬虫之间的关联:
环境安装:pip install selenium -i https://pypi.tuna.tsinghua.edu.cn/simple
驱动安装:
http://npm.taobao.org/mirrors/chromedriver/
这个网址是下载谷歌浏览器的,其他浏览器请自行搜索。基本使用
from selenium import webdriver
from time import sleep
# 实例化浏览器对象,后面是你的浏览器驱动位置,,‘r‘:防止字符转义
driver = webdriver.Chrome(executable_path=‘chromedriver.exe‘)
# 若没有将浏览器设置为系统默认的,需要自定义浏览器位置
# options = webdriver.ChromeOptions()
# options.binary_location = r"浏览器位置Chrome.exe"
# browser = webdriver.Chrome(options=options)
# 用get打开百度页面
driver.get("http://www.baidu.com")
# 进行标签定位找到百度的输入框
search_input = driver.find_element_by_id(‘kw‘)
# 并输入 雪景
search_input.send_keys(‘雪景‘)
sleep(2)
# driver.find_element_by_xpath(),根据xpath表达式定位
# driver.find_element_by_name(),根据标签名称定位
# driver.find_element_by_class_name()
# 点击搜索按钮
driver.find_element_by_id(‘su‘).click()
# 进行页面跳转,等待2秒。
sleep(2)
# 执行js(js注入),滚轮向下一屏高度
driver.execute_script(‘window.scrollTo(0,document.body.scrollHeight)‘)
sleep(2)
# 获取整张页面源码数据
page_text = driver.page_source
print(page_text)
# 关闭浏览器
driver.quit()
from selenium import webdriver
from lxml import etree
driver = webdriver.Chrome(executable_path=‘chromedriver.exe‘)
driver.get(‘http://125.35.6.84:81/xk/‘)
sleep(1)
page_text = driver.page_source
page_text_list = [page_text]
for i in range(3):
# 点击下一页
driver.find_element_by_id(‘pageIto_next‘).click()
sleep(1)
page_text_list.append(driver.page_source)
for page_text in page_text_list:
tree = etree.HTML(page_text)
li_list = tree.xpath(‘//ul[@id="gzlist"]/li‘)
for li in li_list:
title = li.xpath(‘./dl/@title‘)[0]
num = li.xpath(‘./ol/@title‘)[0]
print(title+‘:‘+num)
sleep(2)
driver.quit()
一系列连续的动作,实现动作链需要导入模块:from selenium.webdriver import ActionChains
在实现标签定位时,如果发现定位的标签是存在于iframe标签之中的,则在定位时必须执行一个固定的操作:driver.switch_to.frame(‘id‘),id:iframe标签的id
动作链基本操作
from selenium.webdriver import ActionChains
driver = webdriver.Chrome(executable_path=‘chromedriver.exe‘)
driver.get(‘https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable‘)
driver.switch_to.frame(‘iframeResult‘)
div_tag = driver.find_element_by_id(‘draggable‘)
# 拖动= 点击+滑动
# 实例化动作链对象,传入页面对象
action = ActionChains(driver)
action.click_and_hold(div_tag)
for i in range(5):
# perform让动作链立即执行。17:水平移动,5:垂直移动
action.move_by_offset(17,5).perform()
# action.move_to_element() 移动到哪一标签
sleep(0.5)
# 回收机制,可有可无
action.release()
sleep(3)
driver.quit()
pip install Pillow -i https://pypi.tuna.tsinghua.edu.cn/simple
Pillow模块会自动附带下载PIL模块
# 超级鹰图片验证
import requests
from hashlib import md5
class Chaojiying_Client(object):
def __init__(self, username, password, soft_id):
self.username = username
password = password.encode(‘utf8‘)
self.password = md5(password).hexdigest()
self.soft_id = soft_id
self.base_params = {
‘user‘: self.username,
‘pass2‘: self.password,
‘softid‘: self.soft_id,
}
self.headers = {
‘Connection‘: ‘Keep-Alive‘,
‘User-Agent‘: ‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)‘,
}
def PostPic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
‘codetype‘: codetype,
}
params.update(self.base_params)
files = {‘userfile‘: (‘ccc.jpg‘, im)}
r = requests.post(‘http://upload.chaojiying.net/Upload/Processing.php‘, data=params, files=files, headers=self.headers)
return r.json()
def ReportError(self, im_id):
"""
im_id:报错题目的图片ID
"""
params = {
‘id‘: im_id,
}
params.update(self.base_params)
r = requests.post(‘http://upload.chaojiying.net/Upload/ReportError.php‘, data=params, headers=self.headers)
return r.json()
from PIL import Image
driver = webdriver.Chrome(executable_path=‘chromedriver.exe‘)
driver.get(‘https://kyfw.12306.cn/otn/login/init‘)
sleep(5)
# 截屏,获取整个页面的图片
driver.save_screenshot(‘main.png‘)
# 定位到验证图片对应的标签
code_img_tag = driver.find_element_by_xpath(‘//*[@id="loginForm"]/div/ul[2]/li[4]/div/div/div[3]/img‘)
# img左上角的坐标
location = code_img_tag.location # {‘x’:274,‘y‘:274}
# height:img的高
size = code_img_tag.size # {‘height‘:190,‘width‘:293}
# 裁剪的区域范围:左上角坐标,右下角坐标
rangle = (int(location[‘x‘]),int(location[‘y‘]),int(location[‘x‘]+size[‘width‘]),int(location[‘y‘]+size[‘height‘]))
# PIL模块打开图片
i = Image.open(‘./main.png‘)
# 使用crop方法裁剪图片,Image.crop(left, up, right, below)
frame = i.crop(rangle)
# 保存图片
frame.save(‘code.png‘)
def get_text(imgPath,imgType):
chaojiying = Chaojiying_Client(‘chongxiao‘, ‘chongxiao‘, ‘999123‘)
im = open(imgPath, ‘rb‘).read()
return chaojiying.PostPic(im, imgType)[‘pic_str‘]
result = get_text(‘./code.png‘,9004) #返回两个坐标 55,70|267,133 ==[[55,70],[33,66]]
all_list = []
if ‘|‘ in result:
list_1 = result.split(‘|‘)
count_1 = len(list_1)
for i in range(count_1):
xy_list = []
x = int(list_1[i].split(‘,‘)[0])
y = int(list_1[i].split(‘,‘)[1])
xy_list.append(x)
xy_list.append(y)
all_list.append(xy_list)
else:
x = int(result.split(‘,‘)[0])
y = int(result.split(‘,‘)[1])
xy_list = []
xy_list.append(x)
xy_list.append(y)
all_list.append(xy_list)
print(all_list)# 将坐标转换为[[55,70],[33,66]]类似形式
# 使用动作链对象点击图片中的位置action = ActionChains(driver)
for a in all_list:
x = a[0]
y = a[1]
# 点击
ActionChains(driver).move_to_element_with_offset(code_img_tag,x,y).click().perform()
sleep(1)
# 若释放时间过快会导致点击不成功,所有这里不再释放,也可暂停一段时间再释放
# ActionChains(driver).release()
driver.find_element_by_id(‘username‘).send_keys(‘123456‘)
sleep(1)
driver.find_element_by_id(‘password‘).send_keys(‘123456‘)
sleep(1)
driver.find_element_by_id(‘loginSub‘).click()
sleep(5)
driver.quit()
无头浏览器的操作:无可视化界面的浏览器:谷歌无头浏览器。
# 使用谷歌无头浏览器
from selenium import webdriver
from time import sleep
# 要导入一个Options的类
from selenium.webdriver.chrome.options import Options
# 实例化一个对象
chrome_options = Options()
# 做一些设置,这些是设置是固定的格式
chrome_options.add_argument(‘--headless‘)
chrome_options.add_argument(‘--disable-gpu‘)
# 将实例化好的对象传入浏览器对象中
driver = webdriver.Chrome(r‘chromedriver.exe‘,options=chrome_options)
driver.get(‘https://www.cnblogs.com/‘)
print(driver.page_source)
js代码window.navigator.webdriver
若返回的是undefined
,则请求为正常请求;
若返回为True
,则请求是用selenium发起的。
所以网站门户可以监测该返回值来确定请求是否正常。
规避监测方法:
from selenium import webdriver
# 需要导入模块
from selenium.webdriver import ChromeOptions
option = ChromeOptions()
option.add_experimental_option(‘excludeSwitches‘, [‘enable-automation‘])
#隐式等待设置为20秒
driver.implicitly_wait(time_to_wait=20)
driver = webdriver.Chrome(executable_path=r‘chromedriver.exe‘,options=option)
driver.get(‘https://www.baidu.com/‘)
#print(driver.page_source)
fiddler是一款抓包工具:本质是一种代理服务器
配置:让其可以抓取https协议的请求,默认情况只能抓取http协议请求。tools-->options-->https-->安装证书(点击Decrypt HTTPS traffic)
证书:一种加密方式:
抓移动端数据包配置:
ip:port
;ip(cmd--ipconfig-->IPv4 地址),在当前页面中点击对应的连接下载证书标签:err webdriver 自动 connect drag drive 高度 windows 完成
原文地址:https://www.cnblogs.com/wby-110/p/13473114.html