标签:程序 executor obj func 回调函数 start tps future 固定
import time
from threading import current_thread
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
def func(i):
print((i, current_thread().ident))
# current_thread()获取的是当前所在线程的对象,通过.iden获取当前线程的线程号
if __name__ == '__main__':
starttime = time.time()
tp = ThreadPoolExecutor(4)
for i in range(10):
tp.submit(func, i)
endtime = time.time()
print('总时间: %s' % (endtime - starttime))
'''结果是:
(0, 140627431798528)
(1, 140627431798528)
(2, 140627431798528)
(3, 140627406620416)
总时间: 0.0022306442260742188
(5, 140627431798528)
(6, 140627415013120)
(7, 140627423405824)
(4, 140627406620416)
(9, 140627415013120)
(8, 140627431798528)
'''
import time
from threading import current_thread
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
def func(i):
return (i, current_thread().ident)
# current_thread()获取的是当前所在线程的对象,通过.iden获取当前线程的线程号
if __name__ == '__main__':
starttime = time.time()
tp = ThreadPoolExecutor(4)
for i in range(10):
ret = tp.submit(func, i)
print(ret.result())
# 当执行.result()的时候运行速度变慢了
endtime = time.time()
print('总时间: %s' % (endtime - starttime))
'''
结果是:
(0, 140523361343232)
(1, 140523352950528)
(2, 140523276859136)
(3, 140523268466432)
(4, 140523276859136)
(5, 140523352950528)
(6, 140523268466432)
(7, 140523361343232)
(8, 140523276859136)
(9, 140523352950528)
总时间: 0.00455474853515625
'''
import time
from threading import current_thread
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
def func(i):
return (i, current_thread().ident)
# current_thread()获取的是当前所在线程的对象,通过.iden获取当前线程的线程号
if __name__ == '__main__':
r_list = []
starttime = time.time()
tp = ThreadPoolExecutor(4)
for i in range(10):
ret = tp.submit(func, i)
r_list.append(ret)
[print(ret.result()) for ret in r_list]
endtime = time.time()
tp.map(调用的函数, 参数), 参数必须是可迭代的对象,并且只能是简单的参数
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import time
import requests
import os
base_dir = os.path.dirname(__file__)
def producer(url_all):
name, url = url_all
ret = requests.get(url)
content = ret.text
return name, content
def consumer(res):
name, content = res
print(name)
with open(base_dir + name + '.html', 'w') as f:
f.write(content)
time.sleep(0.1)
if __name__ == '__main__':
tp = ThreadPoolExecutor(4)
url_list = [
('百度', 'https://www.baidu.com'),
('头条', 'https://www.toutiao.com')
]
ret = tp.map(producer, [url_all for url_all in url_list])
# 可迭代传递的参数是比较简单的,向URL爬虫可以很好的利用
# 返回的结果是一个生成器
for i in range(len(url_list)):
tp.submit(consumer, next(ret))
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import time
def func1(i):
i += 1
return i
def func2(i):
print('最终结果是:%s' % (i.result()*2))
# 传过来的参数是future对象,必须通过result()获取
if __name__ == '__main__':
tp = ThreadPoolExecutor(4)
for i in range(10):
ret = tp.submit(func1, i) # 异步非阻塞
# 注意:这里线程池submit返回的结果是一个future对象,必须通过result()获取
ret.add_done_callback(func2)# 异步阻塞
# add_done_callback()是一个回调函数,通过future_obj.add_dont__callback()调用
from concurrent.futures import ThreadPoolExecutor
import time
import requests
import os
import random
base_dir = os.path.dirname(__file__)
def producer(url, name):
global starttime
starttime = time.time()
ret = requests.get(url)
content = ret.text
return name, content
def consumer(res):
name, content = res.result()
with open(base_dir + name + '1.html', 'w') as f:
f.write(content)
print('爬取的网页长度为:%s,花费了%.3fs秒' % (len(content), time.time() - starttime))
time.sleep(random.randrange(1,3))
if __name__ == "__main__":
tp = ThreadPoolExecutor(4)
url_list = [
('百度', 'https://www.baidu.com'),
('头条', 'https://www.toutiao.com')
]
for web in url_list:
ret = tp.submit(producer,web[1], web[0])
ret.add_done_callback(consumer)
标签:程序 executor obj func 回调函数 start tps future 固定
原文地址:https://www.cnblogs.com/ddzc/p/12496967.html