码迷,mamicode.com
首页 > 其他好文 > 详细

爬取所有校园新闻

时间:2017-10-12 13:09:23      阅读:136      评论:0      收藏:0      [点我收藏+]

标签:code   rom   爬取   main   hit   新闻   sts   mod   bre   

  1. 获取单条新闻的#标题#链接#时间#来源#内容 #点击次数,并包装成一个函数。
    import requests
    from bs4 import BeautifulSoup
    import re
    
    res = requests.get("http://news.gzcc.cn/html/xiaoyuanxinwen/")
    res.encoding = utf-8
    
    soup = BeautifulSoup(res.text,html.parser)
    li = soup.select(li)
    
    def gethits(url_1):
        li_id =re.search(_.*/(.*).html,url_1).groups(0)[0]
        hits = requests.get(http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80.format(li_id)).text.split(.)[-1].rstrip(‘‘‘‘);‘‘‘).lstrip(‘‘‘‘html(‘‘‘)
        return hits
    
    def getpageinfo(label):
        for title_list in label:
            if len(title_list.select(.news-list-title))>0:
                href = title_list.select(a)[0][href]
                title = title_list.select(.news-list-title)[0].text
                time = title_list.select(span)[0].text
                info = title_list.select(span)[1].text
    
                res_list = requests.get(href)
                res_list.encoding = utf-8
                soup_list = BeautifulSoup(res_list.text,html.parser)
                text_list = soup_list.select(.show-content)[0].text
    
                hits_list = gethits(href)
    
                print(时间:,time,
                      \n标题:,title,
                      \n链接:,href,
                      \n来源:,info,
                      \n点击次数:,hits_list,\n)
    
                print(正文:,text_list)
                break
    
    getpageinfo(li)

       技术分享

 

        2.获取一个新闻列表页的所有新闻的上述详情,并包装成一个函数。

  1. import requests
    from bs4 import BeautifulSoup
    import re
    
    url_main="http://news.gzcc.cn/html/xiaoyuanxinwen/"
    res = requests.get(url_main)
    res.encoding = utf-8
    
    soup = BeautifulSoup(res.text,html.parser)
    li = soup.select(li)
    
    def gethits(url_1):
        li_id =re.search(_.*/(.*).html,url_1).groups(0)[0]
        hits = requests.get(http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80.format(li_id)).text.split(.)[-1].rstrip(‘‘‘‘);‘‘‘).lstrip(‘‘‘‘html(‘‘‘)
        return hits
    
    def getpageinfo(label):
        for title_list in label:
            if len(title_list.select(.news-list-title))>0:
                href = title_list.select(a)[0][href]
                title = title_list.select(.news-list-title)[0].text
                time = title_list.select(span)[0].text
                info = title_list.select(span)[1].text
    
                res_list = requests.get(href)
                res_list.encoding = utf-8
                soup_list = BeautifulSoup(res_list.text,html.parser)
                text_list = soup_list.select(.show-content)[0].text
    
                hits_list = gethits(href)
    
                print(时间:,time,
                      \n标题:,title,
                      \n链接:,href,
                      \n来源:,info,
                      \n点击次数:,hits_list,\n)
                
                print(正文:,text_list)
                
    getpageinfo(li)

       技术分享

 

        3.获取所有新闻列表页的网址,调用上述函数。

import requests
from bs4 import BeautifulSoup
import re

url_main="http://news.gzcc.cn/html/xiaoyuanxinwen/"
res = requests.get(url_main)
res.encoding = utf-8

soup = BeautifulSoup(res.text,html.parser)
li = soup.select(li)

def gethits(url_1):
    li_id =re.search(_.*/(.*).html,url_1).groups(0)[0]
    hits = requests.get(http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80.format(li_id)).text.split(.)[-1].rstrip(‘‘‘‘);‘‘‘).lstrip(‘‘‘‘html(‘‘‘)
    return hits

def getpageinfo(label):
    for title_list in label:
        if len(title_list.select(.news-list-title))>0:
            href = title_list.select(a)[0][href]
            title = title_list.select(.news-list-title)[0].text
            time = title_list.select(span)[0].text
            info = title_list.select(span)[1].text

            res_list = requests.get(href)
            res_list.encoding = utf-8
            soup_list = BeautifulSoup(res_list.text,html.parser)
            text_list = soup_list.select(.show-content)[0].text

            hits_list = gethits(href)

getpageinfo(li)

pages = int(soup.select(.a1)[0].text.rstrip())//10+1

for i in range(2,pages+1):
    url_page = "http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html".format(i)
    
    res_page = requests.get(url_page)
    res_page.encoding = utf-8

    soup_page = BeautifulSoup(res_page.text,html.parser)
    list_page = soup.select(li)
    getpageinfo(list_page)
    print(url_page)

       技术分享

 

        4.完成所有校园新闻的爬取工作。

  1. import requests
    from bs4 import BeautifulSoup
    import re
    
    url_main="http://news.gzcc.cn/html/xiaoyuanxinwen/"
    res = requests.get(url_main)
    res.encoding = utf-8
    
    soup = BeautifulSoup(res.text,html.parser)
    li = soup.select(li)
    
    def gethits(url_1):
        li_id =re.search(_.*/(.*).html,url_1).groups(0)[0]
        hits = requests.get(http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80.format(li_id)).text.split(.)[-1].rstrip(‘‘‘‘);‘‘‘).lstrip(‘‘‘‘html(‘‘‘)
        return hits
    
    def getpageinfo(label):
        for title_list in label:
            if len(title_list.select(.news-list-title))>0:
                href = title_list.select(a)[0][href]
                title = title_list.select(.news-list-title)[0].text
                time = title_list.select(span)[0].text
                info = title_list.select(span)[1].text
    
                res_list = requests.get(href)
                res_list.encoding = utf-8
                soup_list = BeautifulSoup(res_list.text,html.parser)
                text_list = soup_list.select(.show-content)[0].text
    
                hits_list = gethits(href)
    
                print(时间:,time,
                      \n标题:,title,
                      \n链接:,href,
                      \n来源:,info,
                      \n点击次数:,hits_list,\n)
                
                print(正文:,text_list)
                
    getpageinfo(li)
    
    pages = int(soup.select(.a1)[0].text.rstrip())//10+1
    
    for i in range(2,pages+1):
        url_page = "http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html".format(i)
        
        res_page = requests.get(url_page)
        res_page.encoding = utf-8
    
        soup_page = BeautifulSoup(res_page.text,html.parser)
        list_page = soup.select(li)
        getpageinfo(list_page)
        print(url_page)

       技术分享

 

5.完成自己所选其他主题相应数据的爬取工作。

import requests
from bs4 import BeautifulSoup
import re

url_main="http://www.gamersky.com/"
res = requests.get(url_main)
res.encoding = utf-8


soup = BeautifulSoup(res.text,html.parser)
 
for news in soup.select(li):
    if len(news.select(a))>0:
        title=news.select(a)[0].text
        url=news.select(a)[0][href]
        #time=news.select(‘span‘)[0].contents[0].text
        #print(time,title,url)
        print(title,url)
for i in range(2,pages+1):
    url_page = "http://www.gamersky.com/{}.html".format(i)
    
    res_page = requests.get(url_page)
    res_page.encoding = utf-8

    soup_page = BeautifulSoup(res_page.text,html.parser)
    list_page = soup.select(li)
    getpageinfo(list_page)
    print(url_page)
    

技术分享

 

爬取所有校园新闻

标签:code   rom   爬取   main   hit   新闻   sts   mod   bre   

原文地址:http://www.cnblogs.com/zsc-leo/p/7655335.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!