mirror of
https://github.com/autistic-symposium/web3-starter-py.git
synced 2025-05-30 04:11:26 -04:00
add some definitions to readme
This commit is contained in:
parent
09ea5f12cf
commit
0a0d2707b2
12 changed files with 0 additions and 0 deletions
35
boilerplates-concurrency/README.md
Normal file
35
boilerplates-concurrency/README.md
Normal file
|
@ -0,0 +1,35 @@
|
|||
## Concurrency and Parallelism in Python
|
||||
|
||||
|
||||
### Threading
|
||||
|
||||
* Threading is a feature usually provided by the operating system.
|
||||
* Threads are lighter than processes, and share the same memory space.
|
||||
* With threading, concurrency is achieved using multiple threads, but due to the GIL only one thread can be running at a time.
|
||||
* If your code is IO-heavy (like HTTP requests), then multithreading will still probably speed up your code.
|
||||
|
||||
|
||||
|
||||
#### Multi-processing
|
||||
|
||||
* In multiprocessing, the original process is forked process into multiple child processes bypassing the GIL.
|
||||
* Each child process will have a copy of the entire program's memory.
|
||||
* If your code is performing a CPU bound task, such as decompressing gzip files, using the threading module will result in a slower execution time. For CPU bound tasks and truly parallel execution, use the multiprocessing module.
|
||||
* Higher memory overhead than threading.
|
||||
|
||||
|
||||
### RQ: queueing jobs
|
||||
|
||||
* [RQ](https://python-rq.org/) is aimple but powerful library.
|
||||
* You first enqueue a function and its arguments using the library. This pickles the function call representation, which is then appended to a Redis list.
|
||||
|
||||
|
||||
### Celery: queueing jobs
|
||||
|
||||
* Celery is one of the most popular background job managers in the Python world.
|
||||
* Compatible with several message brokers like RabbitMQ or Redis and can act as both producer and consumer.
|
||||
* Asynchronous task queue/job queue based on distributed message passing. It is focused on real-time operations but supports scheduling as well.
|
||||
|
||||
### concurrent.futures
|
||||
|
||||
* Using a concurrent.futures.ThreadPoolExecutor makes the Python threading example code almost identical to the multiprocessing module.
|
14
boilerplates-concurrency/asyncio_simple_example.py
Normal file
14
boilerplates-concurrency/asyncio_simple_example.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import asyncio
|
||||
|
||||
|
||||
async def delayed_hello():
|
||||
print('Hello ')
|
||||
await asyncio.sleep(1)
|
||||
print('World!')
|
||||
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(delayed_hello())
|
||||
loop.close()
|
19
boilerplates-concurrency/concurrent_future_example.py
Normal file
19
boilerplates-concurrency/concurrent_future_example.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from time import sleep
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
|
||||
def return_after_5_secs(message):
|
||||
sleep(5)
|
||||
return message
|
||||
|
||||
|
||||
pool = ThreadPoolExecutor(3)
|
||||
future = pool.submit(return_after_5_secs, ('Future message'))
|
||||
|
||||
print(future.done())
|
||||
|
||||
sleep(5)
|
||||
print(future.done())
|
||||
print(future.result())
|
39
boilerplates-concurrency/daemon_example.py
Normal file
39
boilerplates-concurrency/daemon_example.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import time
|
||||
import sys
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def daemon():
|
||||
p = multiprocessing.current_process()
|
||||
print('Starting: {}, {}'.format(p.name, p.pid))
|
||||
|
||||
sys.stdout.flush()
|
||||
time.sleep(1)
|
||||
print('Exiting : {}, {}'.format(p.name, p.pid))
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def non_daemon():
|
||||
p = multiprocessing.current_process()
|
||||
print('Starting: {}, {}'.format(p.name, p.pid))
|
||||
|
||||
sys.stdout.flush()
|
||||
print('Exiting : {}, {}'.format(p.name, p.pid))
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
d = multiprocessing.Process(name='daemon', target=daemon)
|
||||
d.daemon = True
|
||||
|
||||
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
|
||||
n.daemon = False
|
||||
|
||||
d.start()
|
||||
time.sleep(1)
|
||||
n.start()
|
12
boilerplates-concurrency/deadlock_example.py
Normal file
12
boilerplates-concurrency/deadlock_example.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import threading
|
||||
|
||||
l = threading.Lock()
|
||||
print("Before first lock acquire.")
|
||||
|
||||
l.acquire()
|
||||
print("Before second lock acquire.")
|
||||
|
||||
l.acquire()
|
||||
print("Lock was acquired twice")
|
16
boilerplates-concurrency/logging_example.py
Normal file
16
boilerplates-concurrency/logging_example.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def worker():
|
||||
print('Doing some work...')
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
multiprocessing.log_to_stderr(logging.DEBUG)
|
||||
p = multiprocessing.Process(target=worker)
|
||||
p.start()
|
||||
p.join()
|
14
boilerplates-concurrency/multiprocessing_example.py
Normal file
14
boilerplates-concurrency/multiprocessing_example.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
import time
|
||||
import random
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def worker(n):
|
||||
sleep = random.randrange(1, 10)
|
||||
time.sleep(sleep)
|
||||
print("Worker {}: sleeping for {} seconds.".format(n, sleep))
|
||||
|
||||
|
||||
for i in range(5):
|
||||
p = multiprocessing.Process(target=worker, args=(i,))
|
||||
p.start()
|
11
boilerplates-concurrency/pool_example.py
Normal file
11
boilerplates-concurrency/pool_example.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from multiprocessing import Pool
|
||||
|
||||
|
||||
def f(x):
|
||||
return x*x
|
||||
|
||||
|
||||
p = Pool(5)
|
||||
print(p.map(f, [1, 2, 3]))
|
27
boilerplates-concurrency/race_coditions.py
Normal file
27
boilerplates-concurrency/race_coditions.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import threading
|
||||
|
||||
x = 0
|
||||
COUNT = 10000000
|
||||
|
||||
def foo():
|
||||
global x
|
||||
for i in range(COUNT):
|
||||
x += 1
|
||||
|
||||
def bar():
|
||||
global x
|
||||
for i in range(COUNT):
|
||||
x -= 1
|
||||
|
||||
t1 = threading.Thread(target=foo)
|
||||
t2 = threading.Thread(target=bar)
|
||||
|
||||
t1.start()
|
||||
t2.start()
|
||||
|
||||
t1.join()
|
||||
t2.join()
|
||||
|
||||
print(x)
|
16
boilerplates-concurrency/thread_example.py
Normal file
16
boilerplates-concurrency/thread_example.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import time
|
||||
import random
|
||||
import threading
|
||||
|
||||
|
||||
def worker(n):
|
||||
sleep = random.randrange(1, 10)
|
||||
time.sleep(sleep)
|
||||
print("Worker {} from {}: sleeping for {} seconds.".format(n, threading.get_ident(), sleep))
|
||||
|
||||
|
||||
for i in range(5):
|
||||
t = threading.Thread(target=worker, args=(i,))
|
||||
t.start()
|
18
boilerplates-concurrency/threadpool_example.py
Normal file
18
boilerplates-concurrency/threadpool_example.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from time import sleep
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
|
||||
def return_after_5_secs(message):
|
||||
sleep(5)
|
||||
return message
|
||||
|
||||
pool = ThreadPoolExecutor(3)
|
||||
|
||||
future = pool.submit(return_after_5_secs, ("hello"))
|
||||
|
||||
print(future.done())
|
||||
sleep(5)
|
||||
print(future.done())
|
||||
print(future.result())
|
19
boilerplates-concurrency/threads_with_queues.py
Normal file
19
boilerplates-concurrency/threads_with_queues.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
|
||||
NUM_WORKERS = 4
|
||||
task_queue = Queue()
|
||||
|
||||
|
||||
def worker():
|
||||
while True:
|
||||
address = task_queue.get()
|
||||
run_function(address)
|
||||
task_queue.task_done()
|
||||
|
||||
|
||||
threads = [Thread(target=worker) for _ in range(NUM_WORKERS)]
|
||||
[task_queue.put(item) for item in threads]
|
||||
[thread.start() for thread in threads]
|
||||
task_queue.join()
|
Loading…
Add table
Add a link
Reference in a new issue