Skip to content

Instantly share code, notes, and snippets.

@MasanoriYamada
Forked from DmitryUlyanov/run_batch2.py
Created November 16, 2018 17:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save MasanoriYamada/963ad4ec59fbf9833eaa35f79967d528 to your computer and use it in GitHub Desktop.
Save MasanoriYamada/963ad4ec59fbf9833eaa35f79967d528 to your computer and use it in GitHub Desktop.
from __future__ import print_function
import threading
from joblib import Parallel, delayed
import Queue
import os
# Fix print
_print = print
_rlock = threading.RLock()
def print(*args, **kwargs):
with _rlock:
_print(*args, **kwargs)
# Define number of GPUs available
N_GPU = 4
# Put indices in queue
q = Queue.Queue(maxsize=N_GPU)
for i in range(N_GPU):
q.put(i)
def runner(x):
gpu = q.get()
print (x, gpu)
# Put here your job cmd
cmd = "python main.py %s" % x
os.system("CUDA_VISIBLE_DEVICES=%d %s" % (gpu, cmd))
# return gpu id to queue
q.put(gpu)
# Change loop
Parallel(n_jobs=N_GPU, backend="threading")(delayed(runner)(i) for i in range(100))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment