标签:col 主题 select 转换 class .gz .text 准备 网络数
from datetime import datetime import requests from bs4 import BeautifulSoup html=‘http://news.gzcc.cn/html/xiaoyuanxinwen/‘ res = requests.get(html) res.encoding=‘utf-8‘ soup = BeautifulSoup(res.text,‘html.parser‘) def getdetail(htm): resd = requests.get(htm) resd.encoding=‘utf-8‘ soupd = BeautifulSoup(resd.text,‘html.parser‘) return soupd.select(‘.show-content‘)[0].text for news in soup.select(‘li‘): if len(news.select(‘.news-list-title‘))>0: htm = news.select(‘a‘)[0][‘href‘] print (news.select(‘a‘)[0][‘href‘]) print (news.select(‘.news-list-title‘)[0].text) time=news.select(‘.news-list-info‘)[0].contents[0].text print (datetime.strptime(time,‘%Y-%m-%d‘)) print (news.select(‘.news-list-info‘)[0].contents[1].text) detail = getdetail(htm) print(detail)
from datetime import datetime import requests from bs4 import BeautifulSoup html=‘http://mil.qq.com/mil_index.htm‘ res = requests.get(html) res.encoding=‘gb2312‘ soup = BeautifulSoup(res.text,‘html.parser‘) for news in soup.select(‘.Q-tpList‘): if len(news.select(‘.Q-tpWrap‘))>0: htm = news.select(‘a‘)[0][‘href‘] print (news.select(‘a‘)[0][‘href‘]) print (news.select(‘.linkto‘)[0].text) print (news.select(‘.from‘)[0].text) print (news.select(‘.discuzBtn‘)[0].text)
用requests库和BeautifulSoup4库爬取新闻列表
标签:col 主题 select 转换 class .gz .text 准备 网络数
原文地址:http://www.cnblogs.com/wlh353/p/7609207.html