Skip to content

Instantly share code, notes, and snippets.

@SleepProgger
Last active August 1, 2019 17:24
Show Gist options
  • Save SleepProgger/cb5a20d388544e96199f045fca310836 to your computer and use it in GitHub Desktop.
Save SleepProgger/cb5a20d388544e96199f045fca310836 to your computer and use it in GitHub Desktop.

// TODO: add all tf functions // TODO: sort by name

// For those cases a TILE impl isn't possible/useful base class to use CPU operations

class CPU_operation(ptile.Operation):
    def __init__(self, inputs, outputs, name=None, side_effects=None):
        self._inputs = inputs
        super(CPU_operation, self).__init__(None, [], outputs, name=name, side_effects=side_effects)

    def cpu_operation(self, *args):
        raise NotImplementedError()

    def bind(self, bindings):
        values = [K.get_value(x) for x in self._inputs]
        results = self.cpu_operation(*values)
        ret = dict()
        for data, output in zip(results, self.outputs.items()):
            out_name, out_val = output
            tensor = plaidml.Tensor(bindings.dev, plaidml.Shape(
                bindings.ctx,
                out_val.shape.dtype,
                *out_val.shape.dims)
            )
            with tensor.mmap_discard(bindings.ctx) as view:
                view.copy_from_ndarray(data)
                view.writeback()
            ret[out_name] = tensor
        return ret
  • tf.reshape SAME

  • tf.norm
    Defaults to euclidian norm: e_norm = lambda x, a=None: K.sqrt(K.sum(K.square(x), axis=a)) (minor different results.... rounding error ?) TODO: inestgitate error + write tf compatible func

  • tf.matrix_inverse()
    Stupid cpu impl but hey it works....

class MatrixInverse(CPU_operation):
    # TODO: mirror tf versions params https://www.tensorflow.org/api_docs/python/tf/linalg/inv
    def __init__(self, x, name=None):
        super(MatrixInverse, self).__init__([x], (('O', x.shape),), name=name)

    def cpu_operation(self, x):
        return (np.linalg.inv(x),)
matrix_inverse = MatrixInverse.function
  • tf.matmul
    K.dot / K.batch_dot (that is as soon it is "fixed" in plaid)

  • tf.reverse
    SAME

  • tf.transpose
    `K.permute_dimensions(A, (0, 2, 1))

  • tf.gather_nd
    CPU impl. Currently only works with data.shape ==(B, X, Y) and ind.shape == (B, XY, 2)*`

    class Gather_nd(CPU_operation):
        # TODO: mirror tf versions params https://riptutorial.com/de/tensorflow/example/29069/wie-benutze-ich-tf-gather-nd
        def __init__(self, data, ind, name=None):
            # TODO: calculate shape properly
            shape = ptile.Shape(data.shape.dtype, (data.shape.dims[0], np.prod(data.shape.dims[1:])))
            #shape = ptile.Shape(data.shape.dtype, (ind.shape.dims[0], np.prod(data.shape.dims[0:]) ) )
            super(Gather_nd, self).__init__([data, ind], (('O', shape),), name=name)
    
        def cpu_operation(self, data, ind):
            # TODO: fix for all axis and shapes
            # TODO: clip ?
            #ind = ind.reshape((np.prod(ind.shape[:2]),2))
            # TODO: this is stupid.. find proper NP function
            d = np.array([ data[b, ind[b, :, 0], ind[b, :, 1]] for b in range(data.shape[0]) ])
            return (d,)
    gather_nd = Gather_nd.function
   
  • tf.clip_by_value
    K.clip

  • tf.Tensor.set_shape
    K.reshape altho this is not as performant as set_shape is just a hint to TF to tell it about the shape whereas reshape really reshapes.

  • tf.unsqueze
    K.expand_dims

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment