Skip to content

Instantly share code, notes, and snippets.

View dlibenzi's full-sized avatar

Davide Libenzi dlibenzi

View GitHub Profile
Traceback (most recent call last):
File "/usr/local/google/home/dlibenzi/tmp/TRASH/grad_grad.py", line 17, in <module>
(dw * dw).mean().backward()
File "/usr/local/google/home/dlibenzi/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/tensor.py", line 185, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/google/home/dlibenzi/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/__init__.py", line 127, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: /usr/local/google/home/dlibenzi/google-git/pytorch/xla/third_party/tensorflow/bazel-tensorflow/tensorflow/compiler/xla/xla_client/debug_macros.h:27 : Check failed: status.status() == ::tensorflow::Status::OK() (Invalid argument: conv_backward_input: Size of out_backprop doesn't match computed: actual = 3, computed = 4 spatial_dim: 2 input: 32 filter: 16 output: 3 stride: 1 dilation: 2 vs. OK)
*** Begin stack trace ***
tensorflow::CurrentStackTrace[abi:c
>>>>>> This is the difference (GOOD - XLA)
Difference Tensor:
(1,1,.,.) =
0.0000 0.0000 0.0000 -0.9593 -0.3904 0.0000 0.0000 0.0000
-0.9408 0.0000 -0.9346 -1.7807 0.0000 -0.5677 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 -2.8547 0.0000
-0.8860 -0.5832 0.0000 -0.8090 -1.1559 -0.9040 0.0000 0.0000
0.0000 0.0000 0.0000 -2.8392 -0.7890 0.0000 0.0000 -0.5895
import sys
import torch
import torch_xla
import torch_xla.core.functions as xf
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
def big_mm(w, x, split=1):
ordinal = xm.get_ordinal()
import torch
import torch_xla
import torch_xla.core.xla_builder as xb
import torch_xla.core.xla_op_registry as xor
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
def _split_indices(index):
ishape = index.shape()
import torch
import torch_xla
import torch_xla.core.xla_builder as xb
import torch_xla.core.xla_op_registry as xor
import torch_xla.core.xla_model as xm
device = xm.xla_device()
tt = torch.randn(2, 2)
print(xb.tensor_shape(tt))
import torch_xla.distributed.xla_multiprocessing as xmp
SERIAL_EXEC = xmp.MpSerialExecutor()
def _mp_fn(_):
def _serial_fn():
import time
print(f'rank {xm.get_ordinal()} start at {time.time()}')
time.sleep(5)
import torch
import torch.nn as nn
class XlaLSTM(nn.Module):
def __init__(self, input_sz, hidden_sz, batch_first=False, pad_value=0):
super(XlaLSTM, self).__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
import torch
import torch.nn as nn
class XlaLSTM(nn.Module):
def __init__(self, input_sz, hidden_sz, batch_first=False, pad_value=0):
super(XlaLSTM, self).__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
import torch
import torch.nn as nn
class XlaLSTM(nn.Module):
def __init__(self, input_sz, hidden_sz, batch_first=False, pad_value=0):
super(XlaLSTM, self).__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
from PIL import Image
import numpy as np
import hashlib
import os
import sys
import torch
import torch_xla.utils.tf_record_reader as tfrr
a = """
image/class/label tensor([82])