码迷,mamicode.com
首页 > 编程语言 > 详细

Python爬虫之queue线程安全实战

时间:2018-10-25 14:23:00      阅读:192      评论:0      收藏:0      [点我收藏+]

标签:pen   批量下载   lib   线程   and   photo   rod   nbsp   none   

1.普通下载

技术分享图片
import requests
import os
import re
from lxml import etree
from urllib import request


def get_detail(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3554.0 Safari/537.36"
    }
    rep = requests.get(url, headers=headers)
    html = etree.HTML(rep.text)
    imgs = html.xpath(//div[@class="page-content text-center"]//img[@class!="gif"])
    for img in imgs:
        img_url = img.get("data-original")
        # 获取图片名称
        img_name = img.get("alt")
        # 过滤特殊字符
        img_name = re.sub(r[\??\.,。!!], "", img_name)
        # 获取图片后缀名
        suffix = os.path.splitext(img_url)[1].split("!")[0]
        filename = img_name + suffix
        # 开始下载到本地
        request.urlretrieve(img_url, "imgs/" + filename)


def main():
    for i in range(1, 101):
        url = "http://www.doutula.com/photo/list/?page={}".format(i)
        get_detail(url)


if __name__ == __main__:
    main()
View Code

2.开启queue多线程安全队列异步下载

技术分享图片
import requests
import os
import re
from lxml import etree
from urllib import request
from queue import Queue
import threading


class Producer(threading.Thread):
    """批量下载"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3554.0 Safari/537.36"
    }

    def __init__(self, page_queue, img_queue, *args, **kwargs):
        # 找到Producer的父类Thread,然后把Producer的对象self转换为Thread的对象,调用父类(Thread)的__init__方法,实例化对象
        super(Producer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            # 队列空为True,则break掉
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.get_detail(url)

    def get_detail(self, url):
        rep = requests.get(url, headers=self.headers)
        text = rep.text
        html = etree.HTML(text)
        imgs = html.xpath(//div[@class="page-content text-center"]//img[@class!="gif"])
        for img in imgs:
            img_url = img.get("data-original")
            # 获取图片名称
            img_name = img.get("alt")
            # 过滤特殊字符
            img_name = re.sub(r[\??\.,。!!\*], "", img_name)
            # 获取图片后缀名
            suffix = os.path.splitext(img_url)[1].split("!")[0]
            filename = img_name + suffix
            # 以元组形式推送到队列中
            self.img_queue.put((img_url, filename))


class Consumer(threading.Thread):
    """批量存储"""
    def __init__(self, page_queue, img_queue, *args, **kwargs):
        # 继承同一个父类,拥有一样的方法和变量
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            # 因为是异步下载,所以需要两个都判断是否为空
            if self.img_queue.empty() and self.page_queue.empty():
                break
            # 获取队列中元组内数据
            img_url, filename = self.img_queue.get()
            request.urlretrieve(img_url, "imgs/" + filename)
            print(filename+"下载完成!")


def main():
    page_queue = Queue(100)       # 设置最大线程数量
    img_queue = Queue(1000)
    for i in range(1, 101):
        url = "http://www.doutula.com/photo/list/?page={}".format(i)
        page_queue.put(url)
    for i in range(5):
        # 开启五个下载线程
        t = Producer(page_queue, img_queue)
        t.start()

    for x in range(5):
        # 开启五个储存线程
        t = Consumer(page_queue, img_queue)
        t.start()


if __name__ == __main__:
    main()
View Code

 

Python爬虫之queue线程安全实战

标签:pen   批量下载   lib   线程   and   photo   rod   nbsp   none   

原文地址:https://www.cnblogs.com/Guishuzhe/p/9848961.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!