|
from concurrent import futures # pip install futures |
|
|
|
from cloudfiles.connection import ConnectionPool |
|
|
|
def get_container(name, conn): |
|
#NOTE: ConnectionPool ensures that `conn` is owned by one thread |
|
# at a time; get/set themself on a dict are atomic in CPython for |
|
# tuples of strings, ints |
|
try: return get_container.cache[name, id(conn)] |
|
except KeyError: |
|
ret = get_container.cache[name, id(conn)] = conn.create_container(name) |
|
return ret |
|
get_container.cache = {} |
|
|
|
def uploader(connection_pool, filename): |
|
conn = connection_pool.get() |
|
try: |
|
obj = get_container('test', conn).create_object(filename) |
|
obj.load_from_filename(filename) |
|
finally: |
|
connection_pool.put(conn) |
|
|
|
nconnections = 16 |
|
username = '---' |
|
api_key = '---' |
|
connection_pool = ConnectionPool(username, api_key, poolsize=nconnections) |
|
|
|
with futures.ThreadPoolExecutor(max_workers=nconnections) as executor: |
|
jobs = [executor.submit(uploader, connection_pool, f) |
|
for f in get_filenames()] |
|
|
|
for future in futures.as_completed(jobs): |
|
if future.exception() is not None: |
|
print(future.exception()) # error |