Skip to content

Instantly share code, notes, and snippets.

@fginter
Created May 19, 2014 14:05
Show Gist options
  • Save fginter/c6206f244d164bd9d4df to your computer and use it in GitHub Desktop.
Save fginter/c6206f244d164bd9d4df to your computer and use it in GitHub Desktop.
test of shared memory array / numpy integration
import multiprocessing
import numpy
#Demonstrates shared memory numpy arrays with no synchronization between processes
def increment(s_arr):
#A function for a single process
#increment every element in s_arr by 1.0
#s_arr is a shared array from multiprocessing
#Let's make s_arr into numpy array
np_array=numpy.frombuffer(s_arr,numpy.double)
for idx in xrange(len(np_array)):
np_array[idx]+=1.0
#Done
#test this thing
arr=numpy.zeros(10,numpy.double) # Numpy array (presumably we'll start with that)
s_arr=multiprocessing.RawArray('d',arr) # Make a shared memory array -> copy of arr; arr is useless after this
arr=None
#Launch a bunch of processes, each will independently increment the array
processes=[]
for x in range(100):
p=multiprocessing.Process(target=increment, args=(s_arr,)) # launch a process which will increment every value of s_arr
processes.append(p) # remember it
p.start() #...and run!
#wait for every process to end
for p in processes:
p.join()
#If all went well, the array should have values near 100's in it now. Not quite hitting 100 because of the lack of synchronization
arr=numpy.frombuffer(s_arr,numpy.double) #make it back to numpy array so we can print easily
print arr
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment