标签:port div enc odi bs4 stp requests from orm
学会使用正则表达式
1. 用正则表达式判定邮箱是否输入正确。
import re
r = "^(\w)+([-+_.]\w+)*@(\w)+((\.\w{2,4}){1,3})$"
e = "757036111@qq.com"
if re.match(r,e):
print(re.match(r, e).group(0))
else:
print("error!")
2. 用正则表达式识别出全部电话号码。
import re str = "版权所有:广州商学院 地址:广州市黄埔区九龙大道206号" "学校学士办公室:020-82876130 学士招生电话:020-82872773" "学校硕士办公室:020-82876131 硕士招生电话:020-82872774" "粤公网安备 44011602000060号 粤ICP备15103669号" numbers = re.findall("(\d{3,4})-(\d{6,8})", str) print(numbers)
3. 用正则表达式进行英文分词。re.split(‘‘,news)
import re news = "ARE? YOU KIDDING ME ? " "NO, IM SERIOUS." word = re.split("[\s,.?\-]+", news) print(word)
4. 使用正则表达式取得新闻编号
import re url = "http://news.gzcc.cn/html/2017/xiaoyuanxinwen_1225/8854.html" newsId = re.findall("\_(.*).html", url)[0].split("/")[-1] print(newsId)
5. 生成点击次数的Request URL
Rurl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newsId) print(Rurl)
6. 获取点击次数
res = requests.get("http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newsId)) print(int(res.text.split(".html")[-1].lstrip("(‘").rsplit("‘);")[0]))
7. 将456步骤定义成一个函数 def getClickCount(newsUrl):
8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):
9. 取出一个新闻列表页的全部新闻 包装成函数def getListPage(pageUrl):
10. 获取总的新闻篇数,算出新闻总页数包装成函数def getPageN():
11. 获取全部新闻列表页的全部新闻详情。
import requests
import re
from bs4 import BeautifulSoup
from datetime import datetime
url = "http://news.gzcc.cn/html/xiaoyuanxinwen/"
res = requests.get(url)
res.encoding = ‘utf-8‘
soup = BeautifulSoup(res.text, ‘html.parser‘)
def getClickCount(newsUrl):
newId = re.search(‘\_(.*).html‘, newsUrl).group(1).split(‘/‘)[-1]
clickUrl = ‘http://oa.gzcc.cn/api.php?op=count&id=9172&modelid=80‘
rest = requests.get(clickUrl).text.split(‘.html‘)[-1].lstrip("(‘").rstrip("‘);")
print("新闻编号:", newId)
print("新闻点击次数URL:", clickUrl)
print("新闻点击次数:", rest)
def getNewDetail(Url):
for news in soup.select(‘li‘):
if len(news.select(‘.news-list-title‘))>0:
t1=news.select(‘.news-list-title‘)[0].text
d1=news.select(‘.news-list-description‘)[0].text
a1=news.select(‘a‘)[0].attrs[‘href‘]
res = requests.get(a1)
res.encoding = ‘utf-8‘
soupd = BeautifulSoup(res.text, ‘html.parser‘)
c1=soupd.select(‘#content‘)[0].text
info=soupd.select(‘.show-info‘)[0].text
print("新闻标题:", t1)
print("新闻链接:", a1)
print("新闻详情:", c1)
resd = requests.get(a1)
resd.encoding = ‘utf-8‘
soupd = BeautifulSoup(resd.text, ‘html.parser‘)
time = soupd.select(‘.show-info‘)[0].text[0:24].lstrip(‘发布时间:‘)
dt = datetime.strptime(time, ‘%Y-%m-%d %H:%M:%S‘)
print("新闻发布时间:", dt)
author=info[info.find(‘作者‘):].split()[0].lstrip(‘作者:‘)
fromwhere = info[info.find(‘来源‘):].split()[0].lstrip(‘来源:‘)
photo = info[info.find(‘摄影‘):].split()[0].lstrip(‘摄影:‘)
print("新闻作者:", author)
print("新闻来源:", fromwhere)
print("新闻摄影:", photo)
getClickCount(a1)
def getPage(url):
return int(soup.select(‘.a1‘)[0].text.rstrip(‘条‘))//10+1
def getlist(url):
for i in soup.select(‘li‘):
if len(i.select(‘.news-list-title‘)) > 0:
place = i.select(‘.news-list-info‘)[0].contents[1].text # 获取来源
title = i.select(‘.news-list-title‘)[0].text # 获取标题
description = i.select(‘.news-list-description‘)[0].text # 获取描述
detailurl = i.select(‘a‘)[0].attrs[‘href‘] # 获取链接
print("来源:" + place)
print("新闻标题:" + title)
print("新闻描述:" + description)
print("新闻链接:" + detailurl)
def getall(url):
for num in range(2,getPage(url)):
listpageurl="http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html".format(num)
getlist(listpageurl)
getNewDetail(listpageurl)
getall(url)
标签:port div enc odi bs4 stp requests from orm
原文地址:https://www.cnblogs.com/lmq757036131/p/8798417.html