Created
December 21, 2018 03:06
-
-
Save ericmjl/8b3ad67d02e2a4214036345111f45831 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
============================= test session starts ============================== | |
platform darwin -- Python 3.6.7, pytest-4.0.2, py-1.7.0, pluggy-0.8.0 | |
rootdir: /Users/ericmjl/github/software/autograd-sparse, inifile: | |
collected 3 items / 2 deselected | |
tests/test_sparse.py F [100%] | |
=================================== FAILURES =================================== | |
_____________________________ test_sparse_dot_grad _____________________________ | |
eye = <5x5 sparse matrix of type '<class 'numpy.float64'>' | |
with 5 stored elements in Compressed Sparse Row format> | |
@pytest.mark.test | |
@pytest.mark.sparse | |
def test_sparse_dot_grad(eye): | |
dense = np.random.random(size=(1, 5)) | |
def fun(x): | |
return sp.dot(dense, x) | |
> check_grads(fun)(eye) | |
tests/test_sparse.py:49: | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/wrap_util.py:20: in nary_f | |
return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/test_util.py:61: in check_grads | |
check_grads(grad_f, (0, 1), modes, order=order-1)(x, v) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/wrap_util.py:20: in nary_f | |
return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/test_util.py:63: in check_grads | |
check_vjp(f, x) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/test_util.py:30: in check_vjp | |
vjp_y = x_vs.covector(vjp(y_vs.covector(y_v))) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/core.py:14: in vjp | |
def vjp(g): return backward_pass(g, end_node) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/core.py:21: in backward_pass | |
ingrads = node.vjp(outgrad[0]) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/core.py:61: in <lambda> | |
return lambda g: (vjp(g),) | |
autograd_sparse/sparse_vjps.py:33: in <lambda> | |
return lambda g: sp.dot(sparse.T, g) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/autograd/tracer.py:48: in f_wrapped | |
return f_raw(*args, **kwargs) | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/sparse/coo/common.py:273: in dot | |
return tensordot(a, b, axes=(a_axis, b_axis)) | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
a = array([[0.24730147], | |
[0.55249291], | |
[0.13048252], | |
[0.40001993], | |
[0.25248632]]) | |
b = array([[-0.28861048, -0.05129975, -0.2414237 , 0.28538278, 0.80744757]]) | |
axes = (-1, -2) | |
def tensordot(a, b, axes=2): | |
""" | |
Perform the equivalent of :obj:`numpy.tensordot`. | |
Parameters | |
---------- | |
a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix] | |
The arrays to perform the :code:`tensordot` operation on. | |
axes : tuple[Union[int, tuple[int], Union[int, tuple[int]], optional | |
The axes to match when performing the sum. | |
Returns | |
------- | |
Union[COO, numpy.ndarray] | |
The result of the operation. | |
Raises | |
------ | |
ValueError | |
If all arguments don't have zero fill-values. | |
See Also | |
-------- | |
numpy.tensordot : NumPy equivalent function | |
""" | |
# Much of this is stolen from numpy/core/numeric.py::tensordot | |
# Please see license at https://github.com/numpy/numpy/blob/master/LICENSE.txt | |
check_zero_fill_value(a, b) | |
if scipy.sparse.issparse(a): | |
a = asCOO(a) | |
if scipy.sparse.issparse(b): | |
b = asCOO(b) | |
try: | |
iter(axes) | |
except TypeError: | |
axes_a = list(range(-axes, 0)) | |
axes_b = list(range(0, axes)) | |
else: | |
axes_a, axes_b = axes | |
try: | |
na = len(axes_a) | |
axes_a = list(axes_a) | |
except TypeError: | |
axes_a = [axes_a] | |
na = 1 | |
try: | |
nb = len(axes_b) | |
axes_b = list(axes_b) | |
except TypeError: | |
axes_b = [axes_b] | |
nb = 1 | |
# a, b = asarray(a), asarray(b) # <--- modified | |
as_ = a.shape | |
nda = a.ndim | |
bs = b.shape | |
ndb = b.ndim | |
equal = True | |
if na != nb: | |
equal = False | |
else: | |
for k in range(na): | |
if as_[axes_a[k]] != bs[axes_b[k]]: | |
equal = False | |
break | |
if axes_a[k] < 0: | |
axes_a[k] += nda | |
if axes_b[k] < 0: | |
axes_b[k] += ndb | |
if not equal: | |
raise ValueError("shape-mismatch for sum") | |
# Move the axes to sum over to the end of "a" | |
# and to the front of "b" | |
notin = [k for k in range(nda) if k not in axes_a] | |
newaxes_a = notin + axes_a | |
N2 = 1 | |
for axis in axes_a: | |
N2 *= as_[axis] | |
newshape_a = (-1, N2) | |
olda = [as_[axis] for axis in notin] | |
notin = [k for k in range(ndb) if k not in axes_b] | |
newaxes_b = axes_b + notin | |
N2 = 1 | |
for axis in axes_b: | |
N2 *= bs[axis] | |
newshape_b = (N2, -1) | |
oldb = [bs[axis] for axis in notin] | |
at = a.transpose(newaxes_a).reshape(newshape_a) | |
bt = b.transpose(newaxes_b).reshape(newshape_b) | |
res = _dot(at, bt) | |
> return res.reshape(olda + oldb) | |
E AttributeError: 'NoneType' object has no attribute 'reshape' | |
../../../anaconda/envs/autograd-sparse/lib/python3.6/site-packages/sparse/coo/common.py:155: AttributeError | |
==================== 1 failed, 2 deselected in 1.33 seconds ==================== |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import autograd.numpy as anp | |
from autograd.extend import defvjp | |
import autograd_sparse.sparse_wrapper as sp | |
def _dot_vjp_0(ans, sparse, dense): | |
if max(anp.ndim(sparse), anp.ndim(dense)) > 2: | |
raise NotImplementedError("Current dot vjps only support ndim <= 2.") | |
if anp.ndim(sparse) == 0: | |
return lambda g: anp.sum(dense * g) | |
if anp.ndim(sparse) == 1 and anp.ndim(dense) == 1: | |
return lambda g: g * dense | |
if anp.ndim(sparse) == 2 and anp.ndim(dense) == 1: | |
return lambda g: g[:, None] * dense | |
if anp.ndim(sparse) == 1 and anp.ndim(dense) == 2: | |
return lambda g: sp.dot(dense, g) | |
return lambda g: sp.dot(g, dense.T) | |
def _dot_vjp_1(ans, sparse, dense): | |
if max(anp.ndim(sparse), anp.ndim(dense)) > 2: | |
raise NotImplementedError("Current dot vjps only support ndim <= 2.") | |
if anp.ndim(dense) == 0: | |
return lambda g: anp.sum(sparse * g) | |
if anp.ndim(sparse) == 1 and anp.ndim(dense) == 1: | |
return lambda g: g * sparse | |
if anp.ndim(sparse) == 2 and anp.ndim(dense) == 1: | |
return lambda g: sp.dot(g, sparse) | |
if anp.ndim(sparse) == 1 and anp.ndim(dense) == 2: | |
return lambda g: sparse[:, None] * g | |
return lambda g: sp.dot(sparse.T, g) | |
defvjp(sp.dot, _dot_vjp_0, _dot_vjp_1) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment