码迷,mamicode.com
首页 > 其他好文 > 详细

使用正则表达式,取得点击次数,函数抽离

时间:2018-04-10 19:38:24      阅读:212      评论:0      收藏:0      [点我收藏+]

标签:imp   mailbox   device   cli   under   获取   etc   resources   water   

学会使用正则表达式

1. 用正则表达式判定邮箱是否输入正确。

2. 用正则表达式识别出全部电话号码。

3. 用正则表达式进行英文分词。re.split(‘‘,news)

4. 使用正则表达式取得新闻编号

5. 生成点击次数的Request URL

6. 获取点击次数

7. 将456步骤定义成一个函数 def getClickCount(newsUrl):

8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):

9. 取出一个新闻列表页的全部新闻 包装成函数def getListPage(pageUrl):

10. 获取总的新闻篇数,算出新闻总页数包装成函数def getPageN():

11. 获取全部新闻列表页的全部新闻详情。

 

import requests
import re
from bs4 import BeautifulSoup

url = ‘http://oa.gzcc.cn/api.php?op=count&id=9183&modelid=80‘
newsUrl = ‘http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html‘
res = requests.get(url)
res.encoding = ‘utf-8‘
r = int(res.text.split(‘.hmtl‘)[0].split(‘;‘)[-2].split(‘html‘)[-1].lstrip("(‘").rstrip("‘)"))
print(r)

a = re.match(‘http://news.gzcc.cn/html/2018/xiaoyuanxinwen_(.*).html‘, newsUrl).group(1).split(‘/‘)[-1]
print(a)

b = re.search(‘\_(.*).html‘, newsUrl).group(1).split(‘/‘)[-1]
print(b)

c = re.findall(‘\_(.*).html‘, newsUrl)[0].split(‘/‘)[-1]
print(c)

# 1. 用正则表达式判定邮箱是否输入正确。
mailbox = ‘^(\w)+(\.\w+)*@(\w)+((\.\w{2,3}){1,3})$‘
e = ‘2647409627@qq.com‘
if re.match(mailbox,e):
    print(re.match(mailbox,e).group(0))
else:
    print(‘error‘)


# 2. 用正则表达式识别出全部电话号码。
newsNum = ‘‘‘版权所有:广州商学院   地址:广州市黄埔区九龙大道206号
学校办公室:020-82876130   招生电话:020-82872773
粤公网安备 44011602000060号    粤ICP备15103669号‘‘‘
tel = re.findall(‘(\d{3,4})-(\d{6,8})‘,newsNum)
print(tel)

# 3. 用正则表达式进行英文分词。re.split(‘‘,news)
news = ‘‘‘The undersea vehicle, which is capable of diving to 4,500m, made its 50th dive on April 6 in the Indian Ocean, a part of China‘s 49th ocean expedition. Of the 50 dives , 35 were in the Indian Ocean and 15 in the South China Sea, according to China Ocean Mineral Resources R&D Association.In the southwest Indian Ocean alone, Qianlong II has traveled more than 2,000 km, they said.The submersible, which first went underwater in 2015, is used for exploring deepseamineral resources."Its operations have become more stable after 50 dives," said Xu Chunhui, a scientist tasked with equipping the submersible.Xu said a part of the upgrade will allow the submersible to work without the presence of its mother vessel1. A new unmanned monitoring device will track the submersible, freeing the mother vessel for other activities.‘‘‘
englishNews = re.split(‘[\s,.!?,"":;]‘,news)
print(englishNews)

# 4. 使用正则表达式取得新闻编号
# newsUrl = ‘http://news.gzcc.cn/html/2018/xiaoyuanxinwen_0404/9183.html‘
newId = re.search(‘\_(.*).html‘, newsUrl).group(1).split(‘/‘)[-1]
print(newId)


# 5. 生成点击次数的Request URL
newId = re.search(‘\_(.*).html‘, newsUrl).group(1).split(‘/‘)[-1]
clickUrl = ‘http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80‘.format(newId)
print(clickUrl)

# 6. 获取点击次数
count = int(requests.get(clickUrl).text.split(‘.html‘)[-1].lstrip("(‘").rstrip("‘);"))
print(count)

# 7. 将456步骤定义成一个函数 def getClickCount(newsUrl):
def getClickCount(newsUrl):
    newId = re.search(‘\_(.*).html‘, newsUrl).group(1).split(‘/‘)[-1]
    clickUrl = ‘http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80‘.format(newId)
    count = requests.get(clickUrl).text.split(‘.html‘)[-1].lstrip("(‘").rstrip("‘);")
    return count

counts = getClickCount(newsUrl)
print(counts)

# 8. 将获取新闻详情的代码定义成一个函数 def getNewDetail(newsUrl):
def getNewDetail(url):
    # 标题
    ti = new.select(‘.news-list-title‘)[0].text
    # 时间
    tim = new.select(‘span‘)[0].text
    # 来源
    source = new.select(‘span‘)[1].text
    resd = requests.get(url)
    resd.encoding = ‘utf-8‘
    soupd = BeautifulSoup(resd.text, ‘html.parser‘)
    # 点击次数
    clickCount = getClickCount(url)
    # print(clickCount)
    print(‘标题:‘ + ti + ‘ 时间:‘ + tim + ‘ 来源:‘ + source + ‘ 点击次数:‘ + clickCount + ‘ 链接: ‘ + url)
    # 正文
    print(‘正文:‘)
    content = soupd.select(‘#content‘)[0].text.split()
    for c in content:
        print(c)
newsUrl = ‘http://news.gzcc.cn/html/xiaoyuanxinwen/‘
res = requests.get(newsUrl)
res.encoding = ‘utf-8‘
soup = BeautifulSoup(res.text, ‘html.parser‘)
for new in soup.select(‘.news-list‘)[0].select(‘li‘):
    # 链接
    a = new.select(‘a‘)[0].attrs[‘href‘]
    getNewDetail(a)

# 9. 取出一个新闻列表页的全部新闻 包装成函数def getListPage(pageUrl):
def getListPage(pageUrl):
    res = requests.get(pageUrl)
    res.encoding = ‘utf-8‘
    soup = BeautifulSoup(res.text, ‘html.parser‘)
    for news in soup.select(‘li‘):
        if len(news.select(‘.news-list-title‘)) > 0:
            a = news.select(‘a‘)[0].attrs[‘href‘]
            getNewDetail(a)

res = requests.get(newsUrl)
res.encoding = ‘utf-8‘
soup = BeautifulSoup(res.text, ‘html.parser‘)
n = int(soup.select(‘.a1‘)[0].text.rstrip(‘条‘))

for i in range(n, n + 1):
    pageUrl = ‘http://news.gzcc.cn/html/xiaoyuanxinwen/[].html‘.format(i)
    getListPage(pageUrl)

# 10. 获取总的新闻篇数,算出新闻总页数包装成函数def getPageN():
def getPageN():
    res = requests.get(newsUrl)
    res.encoding = ‘utf-8‘
    soup = BeautifulSoup(res.text, ‘html.parser‘)
    n = int(soup.select(‘.a1‘)[0].text.rstrip(‘条‘))
    return (n//10+1)

# 11. 获取全部新闻列表页的全部新闻详情。
pageUrl = ‘http://news.gzcc.cn/html/xiaoyuanxinwen/‘
n = getPageN()
for i in range(n, n + 1):
    pageUrl = ‘http://news.gzcc.cn/html/xiaoyuanxinwen/[].html‘.format(i)
    getListPage(pageUrl)

  

使用正则表达式,取得点击次数,函数抽离

标签:imp   mailbox   device   cli   under   获取   etc   resources   water   

原文地址:https://www.cnblogs.com/2647409627qq/p/8781626.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!