码迷,mamicode.com
首页 > 编程语言 > 详细

多线程队列下载天涯帖子

时间:2015-02-06 18:27:08      阅读:137      评论:0      收藏:0      [点我收藏+]

标签:

版本一:


版本二:

  1. #coding:utf-8
  2. import Queue
  3. import threading
  4. import time
  5. import urllib2
  6. import re
  7. import sys
  8. queue = Queue.Queue()
  9. url = ‘http://bbs.tianya.cn/post-16-996521-1.shtml‘
  10. urls=[]
  11. Dict_txt = {}
  12. file_name = ‘abc.txt‘
  13. class ThreadNum(threading.Thread):
  14. def __init__(self, queue):
  15. threading.Thread.__init__(self)
  16. self.queue = queue
  17. def run(self):
  18. while True:
  19. #消费者端,从队列中获取num
  20. num = self.queue.get()
  21. sys.stdout.flush()
  22. down_text(Dict_txt, num)
  23. print ‘**finished download %s**‘ % num
  24. time.sleep(1)
  25. #在完成这项工作之后,使用 queue.task_done() 函数向任务已经完成的队列发送一个信号
  26. self.queue.task_done()
  27. def down_text(txt_dict, url):
  28. """根据传入的url抓出各页内容,按页数做键存入字典"""
  29. print ‘--downling %s--‘ % url
  30. html_content =urllib2.urlopen(url, timeout=30).read()
  31. text_pattern = re.compile(‘<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>‘, re.DOTALL)
  32. text = text_pattern.findall(html_content)
  33. text_join = [‘\r\n\r\n\r\n\r\n‘.join(item) for item in text]
  34. txt_dict[url] = text_join
  35. def page(url):
  36. """根据第一页地址抓取总页数"""
  37. html_page = urllib2.urlopen(url, timeout=30).read()
  38. page_pattern = re.compile(r‘<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下页</a>‘)
  39. page_result = page_pattern.search(html_page)
  40. list1 = []
  41. if page_result:
  42. page_num = int(page_result.group(1))
  43. page_range = range(1, page_num+1)
  44. for num in page_range:
  45. myurl = ‘%s%s.shtml‘ % (url[:-7], num)
  46. list1.append(myurl)
  47. return list1
  48. def write_text(filename, dict, urls):
  49. """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
  50. tx_file = open(filename, ‘w+‘)
  51. for i in urls:
  52. tx_list = dict[i]
  53. for tx in tx_list:
  54. tx = tx.replace(‘<br>‘, ‘\r\n‘).replace(‘<br />‘, ‘\r\n‘).replace(‘ ‘, ‘‘)
  55. tx_file.write(tx.strip()+‘\r\n‘*4)
  56. tx_file.close()
  57. start = time.time()
  58. def main():
  59. #产生一个 threads pool, 并把消息传递给thread函数进行处理,这里开启10个并发
  60. urls = page(url)
  61. for i in range(5):
  62. t = ThreadNum(queue)
  63. t.start()
  64. #往队列中填错数据
  65. for num in urls:
  66. queue.put(num)
  67. #wait on the queue until everything has been processed
  68. queue.join()
  69. print ‘---- start write ----‘
  70. write_text(file_name, Dict_txt, urls)
  71. main()
  72. print"Elapsed Time: %s" % (time.time() - start)


--版本三
  1. #coding:utf-8
  2. import urllib
  3. import re
  4. import threading
  5. import os, time
  6. class Down_Tianya(threading.Thread):
  7. """多线程下载"""
  8. def __init__(self, url, num, dt):
  9. threading.Thread.__init__(self)
  10. self.url = url
  11. self.num = num
  12. self.txt_dict = dt
  13. def run(self):
  14. print ‘downling from %s‘ % self.url
  15. self.down_text()
  16. def down_text(self):
  17. """根据传入的url抓出各页内容,按页数做键存入字典"""
  18. html_content =urllib.urlopen(self.url).read()
  19. text_pattern = re.compile(‘<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>‘, re.DOTALL)
  20. text = text_pattern.findall(html_content)
  21. text_join = [‘\r\n\r\n\r\n\r\n‘.join(item) for item in text]
  22. self.txt_dict[self.num] = text_join
  23. def page(url):
  24. """根据第一页地址抓取总页数"""
  25. html_page = urllib.urlopen(url).read()
  26. page_pattern = re.compile(r‘<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下页</a>‘)
  27. page_result = page_pattern.search(html_page)
  28. if page_result:
  29. page_num = int(page_result.group(1))
  30. return page_num
  31. def write_text(dict, fn):
  32. """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
  33. tx_file = open(fn, ‘w+‘)
  34. pn = len(dict)
  35. for i in range(1, pn+1):
  36. tx_list = dict[i]
  37. for tx in tx_list:
  38. tx = tx.replace(‘<br>‘, ‘\r\n‘).replace(‘<br />‘, ‘\r\n‘).replace(‘ ‘, ‘‘)
  39. tx_file.write(tx.strip()+‘\r\n‘*4)
  40. tx_file.close()
  41. def main():
  42. url = ‘http://bbs.tianya.cn/post-16-996521-1.shtml‘
  43. file_name =‘abc.txt‘
  44. my_page = page(url)
  45. my_dict = {}
  46. print ‘page num is : %s‘ % my_page
  47. threads = []
  48. """根据页数构造urls进行多线程下载"""
  49. for num in range(1, my_page+1):
  50. myurl = ‘%s%s.shtml‘ % (url[:-7], num)
  51. downlist = Down_Tianya(myurl, num, my_dict)
  52. downlist.start()
  53. threads.append(downlist)
  54. """检查下载完成后再进行写入"""
  55. for t in threads:
  56. t.join()
  57. write_text(my_dict, file_name)
  58. print ‘All download finished. Save file at directory: %s‘ % os.getcwd()
  59. if __name__ == ‘__main__‘:
  60. main()





多线程队列下载天涯帖子

标签:

原文地址:http://www.cnblogs.com/highroom/p/da69c3af3d0b2be8b4db5946e2de99f1.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!