标签:run 多次 活跃 mon UI 修改 add name .text
三个需要注意的点:
1.线程抢的是GIL锁,GIL锁相当于执行权限,拿到执行权限后才能拿到互斥锁Lock,其他线程也可以抢到GIL,但如果发现Lock仍然没有被释放则阻塞,即便是拿到执行权限GIL也要立刻交出来
2.join是等待所有,即整体串行,而锁只是锁住修改共享数据的部分,即部分串行,要想保证数据安全的根本原理在于让并发变成串行,join与互斥锁都可以实现,毫无疑问,互斥锁的部分串行效率要更高
线程1抢到GIL锁,拿到执行权限,开始执行,然后加了一把Lock,还没有执行完毕,即线程1还未释放Lock,有可能线程2抢到GIL锁,开始执行,执行过程中发现Lock还没有被线程1释放,于是线程2进入阻塞,被夺走执行权限,有可能线程1拿到GIL,然后正常执行到释放Lock。。。这就导致了串行运行的效果
from threading import Thread,Lock
import time
n=100
def work():
time.sleep(0.05)
global n
mutex.acquire()
temp=n
time.sleep(0.1)
n=temp-1
mutex.release()
if __name__ == ‘__main__‘:
mutex=Lock()
l=[]
start=time.time()
for i in range(100):
t=Thread(target=work)
l.append(t)
t.start()
for t in l:
t.join()
print(‘run time:%s value:%s‘ %(time.time()-start,n))
#多进程:
#优点:可以利用多核优势
#缺点:开销大
#多线程:
#优点:开销小
#缺点:不能利用多核优势
# from threading import Thread
# from multiprocessing import Process
# import time
# #计算密集型
# def work():
# res=1
# for i in range(100000000):
# res+=i
#
# if __name__ == ‘__main__‘:
# p_l=[]
# start=time.time()
# for i in range(4):
# # p=Process(target=work) #6.7473859786987305
# p=Thread(target=work) #24.466399431228638
# p_l.append(p)
# p.start()
# for p in p_l:
# p.join()
#
# print(time.time()-start)
from threading import Thread
from multiprocessing import Process
import time
#IO密集型
def work():
time.sleep(2)
if __name__ == ‘__main__‘:
p_l=[]
start=time.time()
for i in range(400):
# p=Process(target=work) #12.104692220687866
p=Thread(target=work) #2.038116455078125
p_l.append(p)
p.start()
for p in p_l:
p.join()
print(time.time()-start)
死锁现象
from threading import Thread,Lock,RLock
import time
mutexA=Lock()
mutexB=Lock()
class Mythread(Thread):
def run(self):
self.f1()
self.f2()
def f1(self):
mutexA.acquire()
print(‘\033[45m%s 抢到A锁\033[0m‘ %self.name)
mutexB.acquire()
print(‘\033[44m%s 抢到B锁\033[0m‘ %self.name)
mutexB.release()
mutexA.release()
def f2(self):
mutexB.acquire()
print(‘\033[44m%s 抢到B锁\033[0m‘ %self.name)
time.sleep(1)
mutexA.acquire()
print(‘\033[45m%s 抢到A锁\033[0m‘ %self.name)
mutexA.release()
mutexB.release()
if __name__ == ‘__main__‘:
for i in range(20):
t=Mythread()
t.start()
递归锁,在Python中为了支持在同一线程中多次请求同一资源,python提供了可重入锁RLock。
这个RLock内部维护着一个Lock和一个counter变量,counter记录了acquire的次数,从而使得资源可以被多次require。直到一个线程所有的acquire都被release,其他的线程才能获得资源。上面的例子如果使用RLock代替Lock,则不会发生死锁
#递归锁
from threading import Thread,Lock,RLock
import time
mutex=RLock()
class Mythread(Thread):
def run(self):
self.f1()
self.f2()
def f1(self):
mutex.acquire()
print(‘\033[45m%s 抢到A锁\033[0m‘ %self.name)
mutex.acquire()
print(‘\033[44m%s 抢到B锁\033[0m‘ %self.name)
mutex.release()
mutex.release()
def f2(self):
mutex.acquire()
print(‘\033[44m%s 抢到B锁\033[0m‘ %self.name)
time.sleep(1)
mutex.acquire()
print(‘\033[45m%s 抢到A锁\033[0m‘ %self.name)
mutex.release()
mutex.release()
if __name__ == ‘__main__‘:
for i in range(20):
t=Mythread()
t.start()
from threading import Thread,current_thread,Semaphore
import time,random
sm=Semaphore(5)
def work():
sm.acquire()
print(‘%s 上厕所‘ %current_thread().getName())
time.sleep(random.randint(1,3))
sm.release()
if __name__ == ‘__main__‘:
for i in range(20):
t=Thread(target=work)
t.start()
进程池
import requests #pip3 install requests
import os,time
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
def get_page(url):
print(‘<%s> get :%s‘ %(os.getpid(),url))
respone = requests.get(url)
if respone.status_code == 200:
return {‘url‘:url,‘text‘:respone.text}
def parse_page(obj):
dic=obj.result()
print(‘<%s> parse :%s‘ %(os.getpid(),dic[‘url‘]))
time.sleep(0.5)
res=‘url:%s size:%s\n‘ %(dic[‘url‘],len(dic[‘text‘])) #模拟解析网页内容
with open(‘db.txt‘,‘a‘) as f:
f.write(res)
if __name__ == ‘__main__‘:
# p=Pool(4)
p=ProcessPoolExecutor()
urls = [
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
]
for url in urls:
# p.apply_async(get_page,args=(url,),callback=parse_page)
p.submit(get_page,url).add_done_callback(parse_page)
p.shutdown()
print(‘主进程pid:‘,os.getpid())
线程池
import requests #pip3 install requests
import os,time,threading
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
def get_page(url):
print(‘<%s> get :%s‘ %(threading.current_thread().getName(),url))
respone = requests.get(url)
if respone.status_code == 200:
return {‘url‘:url,‘text‘:respone.text}
def parse_page(obj):
dic=obj.result()
print(‘<%s> parse :%s‘ %(threading.current_thread().getName(),dic[‘url‘]))
time.sleep(0.5)
res=‘url:%s size:%s\n‘ %(dic[‘url‘],len(dic[‘text‘])) #模拟解析网页内容
with open(‘db.txt‘,‘a‘) as f:
f.write(res)
if __name__ == ‘__main__‘:
# p=Pool(4)
p=ThreadPoolExecutor(3)
urls = [
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
‘http://www.baidu.com‘,
]
for url in urls:
# p.apply_async(get_page,args=(url,),callback=parse_page)
p.submit(get_page,url).add_done_callback(parse_page)
p.shutdown()
print(‘主进程pid:‘,os.getpid())
标签:run 多次 活跃 mon UI 修改 add name .text
原文地址:http://www.cnblogs.com/jnbb/p/7497547.html