标签:ring path java category 通过 查看 列表 清除 代理
# 导入 webdriver
from selenium import webdriver
# 调用键盘按键操作时需要引入的Keys包
from selenium.webdriver.common.keys import Keys
# 调用环境变量指定的PhantomJS浏览器创建浏览器对象
driver = webdriver.PhantomJS()
# 如果没有在环境变量指定PhantomJS位置
# driver = webdriver.PhantomJS(executable_path="./phantomjs"))
# get方法会一直等到页面被完全加载,然后才会继续程序,通常测试会在这里选择 time.sleep(2)
driver.get("http://www.baidu.com/")
# 获取页面名为 wrapper的id标签的文本内容
data = driver.find_element_by_id("wrapper").text
# 打印数据内容
print data
# 打印页面标题 "百度一下,你就知道"
print driver.title
# 生成当前页面快照并保存
driver.save_screenshot("baidu.png")
# id="kw"是百度搜索输入框,输入字符串"长城"
driver.find_element_by_id("kw").send_keys(u"长城")
# id="su"是百度搜索按钮,click() 是模拟点击
driver.find_element_by_id("su").click()
# 获取新的页面快照
driver.save_screenshot("长城.png")
# 打印网页渲染后的源代码
print driver.page_source
# 获取当前页面Cookie
print driver.get_cookies()
# ctrl+a 全选输入框内容
driver.find_element_by_id("kw").send_keys(Keys.CONTROL,‘a‘)
# ctrl+x 剪切输入框内容
driver.find_element_by_id("kw").send_keys(Keys.CONTROL,‘x‘)
# 输入框重新输入内容
driver.find_element_by_id("kw").send_keys("itcast")
# 模拟Enter回车键
driver.find_element_by_id("su").send_keys(Keys.RETURN)
# 清除输入框内容
driver.find_element_by_id("kw").clear()
# 生成新的页面快照
driver.save_screenshot("itcast.png")
# 获取当前url
print driver.current_url
# 关闭当前页面,如果只有一个页面,会关闭浏览器
# driver.close()
# 关闭浏览器
driver.quit()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
‘‘‘
find_element_by_id
find_elements_by_name
find_elements_by_xpath
find_elements_by_link_text
find_elements_by_partial_link_text
find_elements_by_tag_name
find_elements_by_class_name
find_elements_by_css_selector
‘‘‘
# <div id="coolestWidgetEvah">...</div>
element = driver.find_element_by_id("coolestWidgetEvah")
# <div class="cheese"><span>Cheddar</span></div>
cheeses = driver.find_elements_by_class_name("cheese")
# <iframe src="..."></iframe>
frame = driver.find_element_by_tag_name("iframe")
# <input name="cheese" type="text"/>
cheese = driver.find_element_by_name("cheese")
# <a href="http://www.google.com/search?q=cheese">cheese</a> 根据链接的文本查找
cheese = driver.find_element_by_link_text("cheese")
# <div id="food"><span class="dairy">milk</span><span class="dairy aged">cheese</span></div>
cheese = driver.find_element_by_css_selector("#food span.dairy.aged")
# <input type="text" name="example" />
# <input type="text" name="other" />
inputs = driver.find_elements_by_xpath("//input")
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
driver.get(‘https://www.baidu.com/‘) submitBtn = driver.find_element_by_id(‘su‘) value = submitBtn.get_attribute("value") # 获取属性值
driver.save_screenshot(‘baidu.png’)
‘‘‘
<select id="status" class="form-control valid" οnchange="" name="status">
<option value=""></option>
<option value="0">未审核</option>
<option value="1">初审通过</option>
<option value="2">复审通过</option>
<option value="3">审核不通过</option>
</select>
‘‘‘
# 导入 Select 类
from selenium.webdriver.support.ui import Select
# 找到 name 的选项卡
select = Select(driver.find_element_by_name(‘status‘))
# 选择选项
# index 索引从 0 开始
# value是option标签的一个属性值,并不是显示在下拉框中的值
# visible_text是在option标签文本的值,是显示在下拉框的值
select.select_by_index(1)
select.select_by_value("0")
select.select_by_visible_text(u"未审核")
# 取消全部
select.deselect_all()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
alert = driver.switch_to_alert()
driver.forward() # 前进 driver.back() # 后退
#encoding: utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
driver_path = r"D:\ProgramApp\chromedriver\chromedriver.exe"
driver = webdriver.Chrome(executable_path=driver_path)
driver.get(‘https://www.baidu.com/‘)
# 获取cookies信息
for cookie in driver.get_cookies():
print(cookie)
print(driver.get_cookie("PSTM"))
driver.delete_cookie("PSTM")
# print(driver.get_cookie(‘PSTM‘))
driver.delete_all_cookies()
# for cookie in driver.get_cookies():
# print "%s -> %s" % (cookie[‘name‘], cookie[‘value‘])
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
driver_path = r"D:\ProgramApp\chromedriver\chromedriver.exe"
options = webdriver.ChromeOptions()
# 设置代理
options.add_argument("--proxy-server=http://165.123.0.1:4555")
# 使用代理打开浏览器
driver = webdriver.Chrome(executable_path=driver_path,chrome_options=options)
driver.get("http://httpbin.org/ip")
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
from selenium import webdriver
driver = webdriver.Chrome()
driver.implicitly_wait(10) # 等待10秒
driver.get("http://www.xxxxx.com/loading")
myDynamicElement = driver.find_element_by_id("content")
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
from selenium import webdriver
from selenium.webdriver.common.by import By
# WebDriverWait 库,负责循环等待
from selenium.webdriver.support.ui import WebDriverWait
# expected_conditions 类,负责条件出发
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.get("http://www.xxxxx.com/loading")
try:
# 直到 id="content" 出现
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "content"))
)
finally:
driver.quit()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
#encoding: utf-8
from selenium import webdriver
from lxml import etree
import re
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class tencentSpider(object):
driver_path = r"D:\software\chromedriver.exe"
def __init__(self):
self.driver = webdriver.Chrome(executable_path=tencentSpider.driver_path) # 驱动
self.url = ‘https://tencent.hr/jog‘ # 起始页
self.positions = [] # 保存数据
# 主逻辑
def run(self):
# 发送起始页请求
self.driver.get(self.url)
while True:
source = self.driver.page_source # ajax动态源码
# 设置延时等待下一页按钮加载
WebDriverWait(driver=self.driver, timeout=10).until(
EC.presence_of_element_located(
(By.XPATH, "//div[@class=‘pager_container‘]/span[last()]"))
)
# 解析列表页
self.parse_list_page(source)
# 翻页
try:
# 下一页按钮
next_btn = self.driver.find_element_by_xpath("//div[@class=‘pager_container‘]/span[last()]")
# 判断是否是最后一页
if "pager_next_disabled" in next_btn.get_attribute("class"):
break
else:
next_btn.click() # 点击下一页
except:
print(source)
time.sleep(1)
# 解析列表页
def parse_list_page(self, source):
html = etree.HTML(source)
links = html.xpath("//a[@class=‘position_link‘]/@href") # 详情页的url列表
# 遍历到每一个详情页
for link in links:
# 打开详情页
self.request_detail_page(link)
time.sleep(1)
# 打开详情页
def request_detail_page(self, url):
# 这里不能使用get(), 需要新打开一个详情页
self.driver.execute_script("window.open(‘%s‘)" % url)
self.driver.switch_to.window(self.driver.window_handles[1]) # 选择新详情页
# 等待查找的元素不能是text(), 否则查找失败超时
WebDriverWait(self.driver, timeout=10).until(
EC.presence_of_element_located(
(By.XPATH, "//div[@class=‘job-name‘]/span[@class=‘name‘]"))
)
source = self.driver.page_source # 获取ajax源码
self.parse_detail_page(source) # 解析详情页
# 关闭当前这个详情页
self.driver.close()
# 继续切换回列表页
self.driver.switch_to.window(self.driver.window_handles[0])
# 解析详情页
def parse_detail_page(self, source):
html = etree.HTML(source)
position_name = html.xpath("//span[@class=‘name‘]/text()")[0]
job_request_spans = html.xpath("//dd[@class=‘job_request‘]//span")
salary = job_request_spans[0].xpath(‘.//text()‘)[0].strip()
city = job_request_spans[1].xpath(".//text()")[0].strip()
city = re.sub(r"[\s/]", "", city)
work_years = job_request_spans[2].xpath(".//text()")[0].strip()
work_years = re.sub(r"[\s/]", "", work_years)
education = job_request_spans[3].xpath(".//text()")[0].strip()
education = re.sub(r"[\s/]", "", education)
desc = "".join(html.xpath("//dd[@class=‘job_bt‘]//text()")).strip()
company_name = html.xpath("//h2[@class=‘fl‘]/text()")[0].strip()
position = {
‘name‘: position_name,
‘company_name‘: company_name,
‘salary‘: salary,
‘city‘: city,
‘work_years‘: work_years,
‘education‘: education,
‘desc‘: desc
}
self.positions.append(position)
print(position)
print(‘=‘*40)
if __name__ == ‘__main__‘:
spider = tencentSpider()
spider.run()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
#encoding: utf-8
from selenium import webdriver
from lxml import etree
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
import csv
import pytesseract
from urllib import request
from PIL import Image
import re
class BossSpider(object):
driver_path = r"D:\ProgramApp\chromedriver\chromedriver.exe"
def __init__(self):
self.driver = webdriver.Chrome(executable_path=BossSpider.driver_path)
pytesseract.pytesseract.tesseract_cmd = r‘D:\ProgramApp\TesseractOCR\tesseract.exe‘
self.url = ‘https://www.zhipin.com/c100010000/h_101250100/?query=python‘
self.domain = "https://www.zhipin.com"
fp = open(‘boss.csv‘,‘a‘,newline=‘‘,encoding=‘utf-8‘)
self.writer = csv.DictWriter(fp,[‘name‘,‘company_name‘,‘salary‘,‘city‘,‘work_years‘,‘education‘,‘desc‘])
self.writer.writeheader()
def run(self):
self.driver.get(self.url)
while True:
if len(self.driver.find_elements_by_id("captcha")) > 0:
self.fill_captcha()
time.sleep(2)
continue
source = self.driver.page_source
self.parse_list_page(source)
next_btn = self.driver.find_element_by_xpath("//a[contains(@class,‘next‘)]")
if "disabled" in next_btn.get_attribute(‘class‘):
break
else:
next_btn.click()
def fill_captcha(self):
captchaInput = self.driver.find_element_by_id("captcha")
captchaImg = self.driver.find_element_by_class_name("code")
submitBtn = self.driver.find_element_by_class_name(‘btn‘)
src = captchaImg.get_attribute(‘src‘)
request.urlretrieve(self.domain + src, ‘captcha.png‘)
image = Image.open(‘captcha.png‘)
text = pytesseract.image_to_string(image)
captcha = re.sub(r"[\s/]","",text)
captchaInput.send_keys(captcha)
submitBtn.click()
def parse_list_page(self,source):
html = etree.HTML(source)
links = html.xpath("//div[@class=‘info-primary‘]//a[position()=1]/@href")
for link in links:
url = self.domain+link
self.request_detail_page(url)
time.sleep(1)
def request_detail_page(self,url):
self.driver.execute_script("window.open(‘%s‘)"%url)
self.driver.switch_to.window(self.driver.window_handles[1])
source = self.driver.page_source
self.parse_detail_page(source)
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
def parse_detail_page(self,source):
html = etree.HTML(source)
name = html.xpath("//div[@class=‘name‘]/text()")[0].strip()
salary = html.xpath("//div[@class=‘name‘]/span[@class=‘badge‘]/text()")[0].strip()
infos = html.xpath("//div[@class=‘job-primary‘]/div[@class=‘info-primary‘]/p//text()")
city = infos[0]
work_years = infos[1]
education = infos[2]
company_name = html.xpath("//a[@ka=‘job-detail-company‘]/text()")[0]
desc = html.xpath("//div[@class=‘job-sec‘]/div[@class=‘text‘]//text()")
desc = "\n".join(desc).strip()
position = {
‘name‘: name,
‘company_name‘: company_name,
‘salary‘: salary,
‘city‘: city,
‘work_years‘: work_years,
‘education‘: education,
‘desc‘: desc
}
self.write_position(position)
def write_position(self,position):
self.writer.writerow(position)
print(position)
if __name__ == ‘__main__‘:
spider = BossSpider()
spider.run()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
# -*- coding:utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class db():
def __init__(self):
self.url = "https://www.DB.com/"
self.driver = webdriver.PhantomJS()
def log_in(self):
self.driver.get(self.url)
time.sleep(3) # 睡3分钟,等待页面加载
self.driver.save_screenshot("0.jpg")
#输入账号
self.driver.find_element_by_xpath(‘//*[@id="form_email"]‘).send_keys("xxxxx@qq.com")
#输入密码
self.driver.find_element_by_xpath(‘//*[@id="form_password"]‘).send_keys("xxxx")
#点击登陆
self.driver.find_element_by_class_name("bn-submit").click()
time.sleep(2)
self.driver.save_screenshot("db.jpg")
#输出登陆之后的cookies
print(self.driver.get_cookies())
def __del__(self):
‘‘‘调用内建的稀构方法,在程序退出的时候自动调用
类似的还可以在文件打开的时候调用close,数据库链接的断开
‘‘‘
self.driver.quit()
if __name__ == "__main__":
db= db() #实例化
db.log_in() #之后调用登陆方法
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
#coding=utf-8
from selenium import webdriver
import json
import time
class dd:
# 发送首页的请求
def __init__(self):
self.driver = webdriver.PhantomJS()
self.driver.get("https://www.dd.com/directory/all") #请求首页
#获取页面内容
def get_content(self):
time.sleep(3) #每次发送完请求等待三秒,等待页面加载完成
li_list = self.driver.find_elements_by_xpath(‘//ul[@id="live-list-contentbox"]/li‘)
contents = []
for i in li_list: #遍历房间列表
item = {}
item["img"] = i.find_element_by_xpath("./a//img").get_attribute("src") #获取房间图片
item["title"] = i.find_element_by_xpath("./a").get_attribute("title") #获取房间名字
item["category"] = i.find_element_by_xpath("./a/div[@class=‘mes‘]/div/span").text #获取房间分类
item["name"] = i.find_element_by_xpath("./a/div[@class=‘mes‘]/p/span[1]").text #获取主播名字
item["watch_num"] = i.find_element_by_xpath("./a/div[@class=‘mes‘]/p/span[2]").text #获取观看人数
print(item)
contents.append(item)
return contents
#保存本地
def save_content(self,contents):
f = open("dd.txt","a")
for content in contents:
json.dump(content,f,ensure_ascii=False,indent=2)
f.write("\n")
f.close()
# 主逻辑
def run(self):
#1.发送首页的请求
#2.获取第一页的信息
contents = self.get_content()
#保存内容
self.save_content(contents)
#3.循环 点击下一页按钮,直到下一页对应的class名字不再是"shark-pager-next"
while self.driver.find_element_by_class_name("shark-pager-next"): #判断有没有下一页
# 点击下一页的按钮
self.driver.find_element_by_class_name("shark-pager-next").click()
# 4.继续获取下一页的内容
contents = self.get_content()
# 4.1.保存内容
self.save_content(contents)
if __name__ == "__main__":
dd= dd()
dd.run()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
# 打开首页
driver.get(‘https://www.baidu.com/‘)
# 执行js, 打开新页面
driver.execute_script("window.open(‘https://www.db.com/‘)")
# driver.window_handles 窗口句柄
driver.switch_to_window(driver.window_handles[1]) # 根据index选择窗口
print(driver.current_url) # 当前页面的url
print(driver.page_source) # 当前页面的源码
# 虽然在窗口中切换到了新的页面。但是driver中还没有切换。
# 如果想要在代码中切换到新的页面,并且做一些爬虫。
# 那么应该使用driver.switch_to_window来切换到指定的窗口
# 从driver.window_handlers中取出具体第几个窗口
# driver.window_handlers是一个列表,里面装的都是窗口句柄。
# 他会按照打开页面的顺序来存储窗口的句柄。
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
#
from selenium import webdriver
import time
driver = webdriver.PhantomJS()
driver.get("https://movie.db.com/typerank?type_name=剧情&type=11&interval_id=100:90&action=")
# 向下滚动10000像素
js = "document.body.scrollTop=10000"
#js="var q=document.documentElement.scrollTop=10000"
time.sleep(3)
#查看页面快照
driver.save_screenshot("db.png")
# 执行JS语句
driver.execute_script(js)
time.sleep(10)
#查看页面快照
driver.save_screenshot("db.png")
driver.quit()
————————————————
版权声明:本文为CSDN博主「DeltaTime」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bua200720411091/article/details/93378461
标签:ring path java category 通过 查看 列表 清除 代理
原文地址:https://www.cnblogs.com/llflifei/p/11910711.html