标签:获取网页 爬取 concat 技术 soup turn nbsp lis split()
import requests from bs4 import BeautifulSoup import pandas as pd def Get_data(url): # 请求得到网页内容 res = requests.get(url) # 二进制方式显示网页内容 html = res.content.decode(‘gbk‘) # 格式化网页 soup = BeautifulSoup(html,‘html.parser‘) # 使用soup对象find_all所需内容 tr_list = soup.find_all(‘tr‘) # 提取需要的数据 dates = [] tmp = [] condiitions = [] # 清洗数据 for data in tr_list[1:]: rel_data = data.text.split() dates.append(rel_data[0]) tmp.append(‘‘.join(rel_data[3:6])) condiitions.append(‘‘.join(rel_data[1:3])) Biaoge = pd.DataFrame() Biaoge[‘日期‘] = dates Biaoge[‘温度‘] = tmp Biaoge[‘天气情况‘] = condiitions return Biaoge mounth9 = Get_data(‘http://www.tianqihoubao.com/lishi/beijing/month/201909.html‘) mounth10 = Get_data(‘http://www.tianqihoubao.com/lishi/beijing/month/201910.html‘) mounth11 = Get_data(‘http://www.tianqihoubao.com/lishi/beijing/month/201911.html‘) # 利用pd.concat拼接 9、10、11月份表格为一份 v = pd.concat([mounth9,mounth10,mounth11]).reset_index(drop=True) # 保存数据为csv格式 v.to_csv(‘BeiJing.csv‘,index=False,encoding=‘utf-8‘)
标签:获取网页 爬取 concat 技术 soup turn nbsp lis split()
原文地址:https://www.cnblogs.com/eddycomeon/p/11972188.html