标签:访问 NPU 查看 功能 url lin source dispatch ctr
from selenium import webdriver
import time
browser = webdriver.Chrome()
browser.get(‘https://www.weibo.com‘)
time.sleep(10)
browser.find_element_by_css_selector("#loginname").send_keys("13880576568")
browser.find_element_by_css_selector(".info_list.password input[node-type=‘password‘]").send_keys("shiyan823")
browser.find_element_by_css_selector(".info_list.login_btn a[node-type=‘submitBtn‘]").click()
# 鼠标下拉
for i in range(3):
browser.execute_script(‘window.scrollTo(0, document.body.scrollHeight)‘)
time.sleep(3)
from selenium import webdriver
chrome_opt = webdriver.ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2}
chrome_opt.add_experimental_option("prefs", prefs)
browser = webdriver.Chrome(chrome_options=chrome_opt)
browser.get("https://www.taobao.com")
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from selenium import webdriver
def __init__(self):
self.browser = webdriver.Chrome()
super().__init__()
dispatcher.connect(self.spider_closed, signals.spider_closed)
def spider_closed(self, spider):
#当爬虫退出的时候关闭chrome
print ("spider closed")
self.browser.quit()
from selenium import webdriver
from scrapy.http import HtmlResponse
class JSPageMiddleware(object):
#通过chrome请求动态网页
def process_request(self, request, spider):
if spider.name == "jobbole":
# browser = webdriver.Chrome()
spider.browser.get(request.url)
import time
time.sleep(3)
print ("访问:{0}".format(request.url))
return HtmlResponse(url=spider.browser.current_url, body=spider.browser.page_source, encoding="utf-8", request=request)
pip install pyvirtualdisplay
pip install xvfbwrapper
apt-get install xvfb
from pyvirtualdisplay import Display
display = Display(visible=0, size=(800,600)
display.start()
browser = webdirver.Chrome()
browser.get()
标签:访问 NPU 查看 功能 url lin source dispatch ctr
原文地址:https://www.cnblogs.com/regit/p/9718924.html