标签:
上学期参加了一个大数据比赛,需要抓取大量数据,于是我从新浪微博下手,本来准备使用新浪的API的,无奈新浪并没有开放关键字搜索的API,所以只能用爬虫来获取了。幸运的是,新浪提供了一个高级搜索功能,为我们爬取数据提供了一个很好的切入点。
在查阅了一些资料,参考了一些爬虫的例子后,得到大体思路:构造URL,爬取网页,然后解析网页
具体往下看~
登陆新浪微博,进入高级搜索,如图输入,之后发送请求会发现地址栏变为如下: http://s.weibo.com/weibo/%25E4%25B8%25AD%25E5%25B1%25B1%25E5%25A4%25A7%25E5%25AD%25A6®ion=custom:44:1&typeall=1&suball=1×cope=custom:2015-08-07-0:2015-08-08-0&Refer=g
解析如下:
固定地址部分:http://s.weibo.com/weibo/
关键字二次UTF-8编码:%25E4%25B8%25AD%25E5%25B1%25B1%25E5%25A4%25A7%25E5%25AD%25A6
搜索地区:region=custom:44:1
搜索时间范围:timescope=custom:2015-08-07-0:2015-08-08-0
可忽略项:Refer=g
某次请求的页数:page=1(第一页可不加)
我们查看一下网页源代码看看有什么鬼:
小伙伴们第一次看到肯定大呼我的天啊,真的是看的眼花缭乱。
别着急,让我娓娓道来。
首先,我们定位到图示的地方,即出现字符串<script>STK && STK.pageletM && STK.pageletM.view({"pid":"pl_weibo_direct"的地方,此处即搜索到的微博页面的代码啦~
页面是unicode码,所以中文都不能正常显示~而且上面没有排版,才显得如此杂乱。
我们可以先对抓取到的页面处理一下,这时就要用到lxml的etree了,它可以将网页内容的结点构建成一棵树。
我们拿出其中一个结点出来看看:
<a class=\"W_texta W_fb\" nick-name=\"\u554a\u5be7\u5504\" href=\"http:\/\/weibo.com\/612364698\" target=\"_blank\" title=\"\u554a\u5be7\u5504\" usercard=\"id=1884932730&usercardkey=weibo_mp\"\t\tsuda-data=\"key=tblog_search_weibo&value=weibo_ss_1_name\" class=\"name_txt W_fb\">
在这个结点中,我们可以获取该条微博的博主的一些信息,如nick-name,微博地址href。
我们再看看另一个结点:
<p class=\"comment_txt\" node-type=\"feed_list_content\" nick-name=\"\u554a\u5be7\u5504\">\u8fd9\u4e48\u52aa\u529b \u5c45\u7136\u5012\u6570\u7b2c\u4e94 \u5509 \u4e0d\u884c\u6211\u8981\u8ffd\u56de\u6765 \u8d8a\u632b\u8d8a\u52c7 \u4e0d\u53ef\u4ee5\u81ea\u66b4\u81ea\u5f03 \u4e0d\u53ef\u4ee5\u8ba9\u8d1f\u9762\u60c5\u7eea\u8dd1\u51fa\u6765 \u83dc\u575a\u5f3a \u52a0\u6cb9\u52a0\u6cb9\u52a0\u6cb9 \u6211\u8981\u4e0a<em class=\"red\">\u4e2d\u5c71\u5927\u5b66<\/em> \u6211\u8981\u548c\u5c0f\u54c8\u5427\u4e00\u6240\u5927\u5b66 \u62fc\u4e86<\/p>
这个结点包含的数据即为微博的内容。
这样子就清晰很多了。至于如何搜索相应的结点,取得结点的属性和内容等,我们用的是xpath这个工具。
关于xpath,见文 http://blog.csdn.net/raptor/article/details/4516441
获得数据后,是数据的保存,我是将数据导入到excel中,用到的xlwt和xlrd这两个模块。
最后数据的效果(我搜集的信息比较具体,需要访问博主的个人主页获取,为便于大家阅读、理解,下面代码中删去了这部分):
代码:
-
- ‘‘
- import wx
- import sys
- import urllib
- import urllib2
- import re
- import json
- import hashlib
- import os
- import time
- from datetime import datetime
- from datetime import timedelta
- import random
- from lxml import etree
- import logging
- import xlwt
- import xlrd
- from xlutils.copy import copy
-
-
- class CollectData():
-
- def __init__(self, keyword, startTime, interval=‘50‘, flag=True, begin_url_per = "http://s.weibo.com/weibo/"):
- self.begin_url_per = begin_url_per
- self.setKeyword(keyword)
- self.setStartTimescope(startTime)
-
- self.setInterval(interval)
- self.setFlag(flag)
- self.logger = logging.getLogger(‘main.CollectData‘)
-
-
-
- def setKeyword(self, keyword):
- self.keyword = keyword.decode(‘GBK‘,‘ignore‘).encode("utf-8")
- print ‘twice encode:‘,self.getKeyWord()
-
-
- def getKeyWord(self):
- once = urllib.urlencode({"kw":self.keyword})[3:]
- return urllib.urlencode({"kw":once})[3:]
-
-
-
- def setStartTimescope(self, startTime):
- if not (startTime == ‘-‘):
- self.timescope = startTime + ":" + startTime
- else:
- self.timescope = ‘-‘
-
-
-
-
-
-
- def setInterval(self, interval):
- self.interval = int(interval)
-
-
- def setFlag(self, flag):
- self.flag = flag
-
-
- def getURL(self):
- return self.begin_url_per+self.getKeyWord()+"&typeall=1&suball=1×cope=custom:"+self.timescope+"&page="
-
-
- def download(self, url, maxTryNum=4):
- hasMore = True
- isCaught = False
- name_filter = set([])
-
- i = 1
- while hasMore and i < 51 and (not isCaught):
- source_url = url + str(i)
- data = ‘‘
- goon = True
-
- for tryNum in range(maxTryNum):
- try:
- html = urllib2.urlopen(source_url, timeout=12)
- data = html.read()
- break
- except:
- if tryNum < (maxTryNum-1):
- time.sleep(10)
- else:
- print ‘Internet Connect Error!‘
- self.logger.error(‘Internet Connect Error!‘)
- self.logger.info(‘url: ‘ + source_url)
- self.logger.info(‘fileNum: ‘ + str(fileNum))
- self.logger.info(‘page: ‘ + str(i))
- self.flag = False
- goon = False
- break
- if goon:
- lines = data.splitlines()
- isCaught = True
- for line in lines:
-
- if line.startswith(‘<script>STK && STK.pageletM && STK.pageletM.view({"pid":"pl_weibo_direct"‘):
- isCaught = False
- n = line.find(‘html":"‘)
- if n > 0:
- j = line[n + 7: -12].encode("utf-8").decode(‘unicode_escape‘).encode("utf-8").replace("\\", "")
-
- if (j.find(‘<div class="search_noresult">‘) > 0):
- hasMore = False
-
- else:
-
- page = etree.HTML(j.decode(‘utf-8‘))
- ps = page.xpath("//p[@node-type=‘feed_list_content‘]")
- addrs = page.xpath("//a[@class=‘W_texta W_fb‘]")
- addri = 0
-
- for p in ps:
- name = p.attrib.get(‘nick-name‘)
- txt = p.xpath(‘string(.)‘)
- addr = addrs[addri].attrib.get(‘href‘)
- addri += 1
- if(name != ‘None‘ and str(txt) != ‘None‘ and name not in name_filter):
- name_filter.add(name)
- oldWb = xlrd.open_workbook(‘weiboData.xls‘, formatting_info=True)
- oldWs = oldWb.sheet_by_index(0)
- rows = int(oldWs.cell(0,0).value)
- newWb = copy(oldWb)
- newWs = newWb.get_sheet(0)
- newWs.write(rows, 0, str(rows))
- newWs.write(rows, 1, name)
- newWs.write(rows, 2, self.timescope)
- newWs.write(rows, 3, addr)
- newWs.write(rows, 4, txt)
- newWs.write(0, 0, str(rows+1))
- newWb.save(‘weiboData.xls‘)
- print "save with same name ok"
- break
- lines = None
-
- if isCaught:
- print ‘Be Caught!‘
- self.logger.error(‘Be Caught Error!‘)
- self.logger.info(‘filePath: ‘ + savedir)
- self.logger.info(‘url: ‘ + source_url)
- self.logger.info(‘fileNum: ‘ + str(fileNum))
- self.logger.info(‘page:‘ + str(i))
- data = None
- self.flag = False
- break
-
- if not hasMore:
- print ‘No More Results!‘
- if i == 1:
- time.sleep(random.randint(3,8))
- else:
- time.sleep(10)
- data = None
- break
- i += 1
-
- sleeptime_one = random.randint(self.interval-25,self.interval-15)
- sleeptime_two = random.randint(self.interval-15,self.interval)
- if i%2 == 0:
- sleeptime = sleeptime_two
- else:
- sleeptime = sleeptime_one
- print ‘sleeping ‘ + str(sleeptime) + ‘ seconds...‘
- time.sleep(sleeptime)
- else:
- break
-
-
- def getTimescope(self, perTimescope):
- if not (perTimescope==‘-‘):
- times_list = perTimescope.split(‘:‘)
- start_date = datetime(int(times_list[-1][0:4]), int(times_list[-1][5:7]), int(times_list[-1][8:10]) )
- start_new_date = start_date + timedelta(days = 1)
- start_str = start_new_date.strftime("%Y-%m-%d")
- return start_str + ":" + start_str
- else:
- return ‘-‘
-
- def main():
- logger = logging.getLogger(‘main‘)
- logFile = ‘./collect.log‘
- logger.setLevel(logging.DEBUG)
- filehandler = logging.FileHandler(logFile)
- formatter = logging.Formatter(‘%(asctime)s - %(name)s - %(levelname)s: %(message)s‘)
- filehandler.setFormatter(formatter)
- logger.addHandler(filehandler)
-
-
- while True:
-
- keyword = raw_input(‘Enter the keyword(type \‘quit\‘ to exit ):‘)
- if keyword == ‘quit‘:
- sys.exit()
- startTime = raw_input(‘Enter the start time(Format:YYYY-mm-dd):‘)
-
- interval = raw_input(‘Enter the time interval( >30 and deafult:50):‘)
-
-
- cd = CollectData(keyword, startTime, interval)
- while cd.flag:
- print cd.timescope
- logger.info(cd.timescope)
- url = cd.getURL()
- cd.download(url)
- cd.timescope = cd.getTimescope(cd.timescope)
- else:
- cd = None
- print ‘-----------------------------------------------------‘
- print ‘-----------------------------------------------------‘
- else:
- logger.removeHandler(filehandler)
- logger = None
上面实现了数据的爬取,再结合上一篇文章中的模拟登录,就可以美美的抓数据啦~
【python网络编程】新浪爬虫:关键词搜索爬取微博数据
标签:
原文地址:http://www.cnblogs.com/AmilyWilly/p/5938998.html