#!/usr/bin/env python# Author:Zhangmingdaimport queue,threadingfrom multiprocessing import Process,Queuedef f(q): q.put([234,None,'Hello'])if __name__ == '__main__': q = queue.Queue() #使用线程队列运行报错TypeError: can't pickle _thread.lock objects p = Process(target=f,args=(q,)) #启动一个子进程 # p = threading.Thread(target=f,) #启动线程直接共享主进程内存 p.start() print(q.get()) p.join()
C:\Users\Administrator\Desktop\Python3_study\venv\Scripts\python.exe C:/Users/Administrator/Desktop/Python3_study/day10/进程间通信?.pyTraceback (most recent call last): File "C:/Users/Administrator/Desktop/Python3_study/day10/进程间通信?.py", line 13, inp.start() File "C:\Program Files\Python36\lib\multiprocessing\process.py", line 105, in start self._popen = self._Popen(self) File "C:\Program Files\Python36\lib\multiprocessing\context.py", line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\Program Files\Python36\lib\multiprocessing\context.py", line 322, in _Popen return Popen(process_obj) File "C:\Program Files\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__ reduction.dump(process_obj, to_child) File "C:\Program Files\Python36\lib\multiprocessing\reduction.py", line 60, in dump ForkingPickler(file, protocol).dump(obj)TypeError: can't pickle _thread.lock objects
#!/usr/bin/env python# Author:Zhangmingdaimport queue,threadingfrom multiprocessing import Process,Queuedef f(q): q.put([234,None,'Hello'])if __name__ == '__main__': # q = queue.Queue() #使用线程队列运行报错TypeError: can't pickle _thread.lock objects q = Queue() #进程队列 p = Process(target=f,args=(q,)) #启动一个子进程 # p = threading.Thread(target=f,) #启动线程直接共享主进程内存 p.start() print(q.get()) p.join()
进程间通讯原理:本例中等于将父进程中的q克隆了一份给子进程,子进程进行了序列化,在子进程中执行后又反序列化给父进程。所以在父进程中能够得到子进程对父进程中数据的修改结果。
C:\Users\Administrator\Desktop\Python3_study\venv\Scripts\python.exe C:/Users/Administrator/Desktop/Python3_study/day10/进程间通信?.py[234, None, 'Hello']Process finished with exit code 0
#!/usr/bin/env python# Author:Zhangmingdafrom multiprocessing import Process,Managerimport osdef f(d,l): d[os.getpid()] = os.getpid() #对传入的字典进行赋值 l.append(os.getpid()) #向列表中增加数据if __name__ == '__main__': with Manager() as manager: d = manager.dict() l = manager.list(range(5)) print('初始化一个进程间可以共享的字典和列表:',d,l) p_list = [] #准备存储进程实例 for i in range(10): #准备启动10个进程 p = Process(target=f,args=(d,l)) p.start() print('启动第%d个进程'%(i+1)) p_list.append(p) for res in p_list: res.join() #等待所有进程运行结束 print(d,l)
C:\Users\Administrator\Desktop\Python3_study\venv\Scripts\python.exe C:/Users/Administrator/Desktop/Python3_study/day10/进程间数据共享manager.py初始化一个进程间可以共享的字典和列表: {} [0, 1, 2, 3, 4]启动第1个进程启动第2个进程启动第3个进程启动第4个进程启动第5个进程启动第6个进程启动第7个进程启动第8个进程启动第9个进程启动第10个进程{ 6632: 6632, 5300: 5300, 6044: 6044, 2212: 2212, 3828: 3828, 5184: 5184, 5412: 5412, 7000: 7000, 5768: 5768, 5784: 5784} [0, 1, 2, 3, 4, 6632, 5300, 6044, 2212, 3828, 5184, 5412, 7000, 5768, 5784]Process finished with exit code 0
(实际进程间还是copy了同样的数据最后汇总,Manager()自己内部有锁)
#!/usr/bin/env python# Author:Zhangmingdafrom multiprocessing import Poolimport time,osdef Foo(i): time.sleep(1) print('in process',os.getpid()) return i + 100 #return 的值可以被当做参数传给回调函数利用def Bar(arg): print('-->exec done:',arg,os.getpid())if __name__ == '__main__': pool = Pool(processes=3) #限制可以同时运行的进程数量 print('主进程ID:',os.getpid()) for i in range(10): # pool.apply(func=Foo,args=(i,)) #串行 # pool.apply_async(func=Foo,args=(i,)) #并行 pool.apply_async(func=Foo,args=(i,),callback=Bar) #并行+回调主进程运行 print('END。。。。。') pool.close() pool.join() #等待所有进程运行结束,否则会直接结束运行。
C:\Users\Administrator\Desktop\Python3_study\venv\Scripts\python.exe C:/Users/Administrator/Desktop/Python3_study/day10/进程池(限制同时运行的进程数量).py主进程ID: 5508END。。。。。in process 6856-->exec done: 100 5508in process 6620-->exec done: 101 5508in process 7988-->exec done: 102 5508in process 6856-->exec done: 103 5508in process 6620-->exec done: 104 5508in process 7988-->exec done: 105 5508in process 6856-->exec done: 106 5508in process 6620-->exec done: 107 5508in process 7988-->exec done: 108 5508in process 6856-->exec done: 109 5508Process finished with exit code 0