标签:注意事项 list 启动 nat 最新 安装 charset 保存数据 --
正则表达式拆分
import re # 1.拆分字符串 one = ‘asdsfsgsh‘ # 标准 是 s 为拆分 pattern = re.compile(‘s‘) result = pattern.split(one) # print(result) # 2.匹配中文 two = ‘<a href="https://www.baidu.com/" nslog="normal" nslog-type="10600112" data-href="https://www.baidu.com/s?ie=utf-8&fr=bks0000&wd=">网页是最新版本的,适配移动端</a>‘ # python中 匹配中问 [a-z] unicode的范围 * + ? pattern = re.compile(‘[\u4e00-\u9fa5]+‘) result = pattern.findall(two) print(result)
import re import requests url = ‘http://news.baidu.com/‘ headers = { "User-Agent": ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36‘ } # response.text 不太准确 转码 是靠推测 data = requests.get(url, headers=headers).content.decode() # 正则解析 数据 # 每个新闻的titile, url # <a href="http://news.cnr.cn/native/gd/20181028/t20181028_524397644.shtml" target="_blank" mon="r=1">民营经济再吃定心丸,民企当体会怎样深意</a> pattern = re.compile(‘<a href="(.*?)" target="_blank" mon="(.*?)">(.*?)</a>‘) # pattern = re.compile(‘<a (.*?)</a>‘,re.S) result = pattern.findall(data) print(result) # with open(‘02news.html‘, ‘w‘) as f: # f.write(data)
xpath解析
import re import requests # 安装支持 解析html和XML的解析库 lxml # pip install lxml from lxml import etree url = ‘http://news.baidu.com/‘ headers = { "User-Agent": ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36‘ } # response.text 不太准确 转码 是靠推测 data = requests.get(url, headers=headers).content.decode() # 1.转解析类型 xpath_data = etree.HTML(data) # xpath 语法 1. 节点 / # 2. 跨节点: // # 3. 精确的标签: //a[@属性="属性值"] # 4. 标签包裹的内容 text() # 5. 属性:@href # xpath--s数据类型---list # 2调用 xpath的方法 result = xpath_data.xpath(‘/html/head/title//text()‘) result = xpath_data.xpath(‘//a/text()‘) result = xpath_data.xpath(‘//a[@mon="ct=1&a=2&c=top&pn=18"]/text()‘) result = xpath_data.xpath(‘//a[@mon="ct=1&a=2&c=top&pn=18"]/@href‘) result = xpath_data.xpath(‘//li/a/text()‘) print(result) # with open(‘02news.html‘, ‘w‘) as f: # f.write(data)
注意事项
from lxml import etree html = """ <html> <body> <ul> <li>1 <a href="">子</a> </li> <li>2 <a href="">子</a> </li> <li>3 <a href="">子</a> </li> <li>4 <a href="">子</a> </li> <li>5 <a href="">子</a> </li> </ul> </body> </html> """ # 1.转类型 x_data = etree.HTML(html) # 2.xpath 下标 是从 1开始; 只能取 平级关系的标签 result = x_data.xpath(‘//li[5]/text()‘) result = x_data.xpath(‘/html/body/ul/li/a/text()‘) result = x_data.xpath(‘//a[2]‘) print(result)
拆分实例
import requests from lxml import etree import json class BtcSpider(object): def __init__(self): self.base_url = ‘http://8btc.com/forum-61-‘ self.headers = { "User-Agent": ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36‘ } self.data_list = [] # 1.发请求 def get_response(self, url): response = requests.get(url, headers=self.headers) # 网页的 编码到底 是 gbk 还是 urf-8 head--meta-charset="" # 原因 是 抓取 网页的 编码 就是 gbk的 所以 解码的时候 也是要gbk # data = response.content.decode(‘gbk‘) data = response.content return data # 2.解析数据 def parse_data(self, data): # 使用xpath 解析当前页面 所有的 新闻title 和url 保存 # 1.转类型 x_data = etree.HTML(data) # 2.根据xpath路径解析 # 路径 1. 纯手写 2. 借助浏览器的 右击 粘贴xpath路径; 需要修改 title_list = x_data.xpath(‘//a[@class="s xst"]/text()‘) # title_list = x_data.xpath(‘//form[@id="moderate"]/div/div[2]/div/a[@class="s xst"]/text()‘) url_list = x_data.xpath(‘//a[@class="s xst"]/@href‘) for index, title in enumerate(title_list): news = {} # print(index) # print(title) news[‘name‘] = title news[‘url‘] = url_list[index] self.data_list.append(news) # 3.保存数据 def save_data(self): # 将 list---str data_str = json.dumps(self.data_list) with open(‘05btc.json‘, ‘w‘) as f: f.write(data_str) # 4.启动 def run(self): for i in range(1, 5): # 1.拼接 完整url url = self.base_url + str(i) + ‘.html‘ print(url) # 2.发请求 data = self.get_response(url) # 3.做解析 self.parse_data(data) # 4.保存 self.save_data() BtcSpider().run()
标签:注意事项 list 启动 nat 最新 安装 charset 保存数据 --
原文地址:https://www.cnblogs.com/sunBinary/p/10624913.html