Skip to content

Instantly share code, notes, and snippets.

@jwhitehorn
Created March 5, 2019 23:24
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jwhitehorn/a0cfca1dc2f7a9d0d41c8473bad848d2 to your computer and use it in GitHub Desktop.
Save jwhitehorn/a0cfca1dc2f7a9d0d41c8473bad848d2 to your computer and use it in GitHub Desktop.
input = numpy.array([data[0] for data in input_data])
mf = cl.mem_flags
temp = cl.Buffer(self.opencl_context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=input)
a_g = temp
work_size = len(input_data)
res = None
for layer in layers:
params = [1,layer.shape[0],layer.shape[1]]
b_g = cl.Buffer(self.opencl_context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=layer) #layer.view(dtype=numpy.float32)
res = numpy.empty([work_size, params[2]], dtype=numpy.float64) #dtype=numpy.float32
c_g = cl.Buffer(self.opencl_context, mf.COPY_HOST_PTR, hostbuf=res)
params_buffer = cl.Buffer(self.opencl_context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=numpy.array(params))
self.opencl_program.multiply(self.opencl_queue, (work_size,), None, params_buffer, a_g, b_g, c_g)
a_g = c_g
cl.enqueue_copy(opencl_queue, res, a_g)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment