Skip to content

Instantly share code, notes, and snippets.

James Reed jamesr66a

  • Facebook
  • Menlo Park, CA
View GitHub Profile
View gist:aedd204f0c876ec1b4f5486d96d563fc
RuntimeError:
attribute lookup is not defined on python value of type 'Sequential':
@torch.jit.script_method
def forward(self, x):
return self.layers.forward(x)
~~~~~~~~~~~~~~~~~~~ <--- HERE
View torchscript.py
def forward(self,
x: Tensor,
w_out: Tensor,
b_out: Tensor) -> Tensor:
h = torch.zeros([1024], dtype=6, layout=0, device=torch.device("cpu"))
c = torch.zeros([1024], dtype=6, layout=0, device=torch.device("cpu"))
c0, h0 = c, h
for t in range(torch.size(x, 0)):
_0 = torch.select(x, 0, t)
_1 = (h0, c0)
View gist:21fc5f0d407ac80d779d661560467710
from __future__ import print_function
def tf__RNN(x, weights, biases):
try:
with ag__.function_scope('RNN'):
do_return = False
retval_ = None
x = ag__.converted_call('unstack', tf, ag__.ConversionOptions(recursive=True, verbose=0, strip_decorators=(ag__.convert, ag__.do_not_convert, ag__.converted_call), force_conversion=False, optional_features=ag__.Feature.ALL, internal_convert_user_code=True), (x, timesteps, 1), {})
lstm_cell = ag__.converted_call(LSTMCell, None, ag__.ConversionOptions(recursive=True, verbose=0, strip_decorators=(ag__.convert, ag__.do_not_convert, ag__.converted_call), force_conversion=False, optional_features=ag__.Feature.ALL, internal_convert_user_code=True), (num_hidden,), {'forget_bias': 1.0})
outputs, states = ag__.converted_call(static_rnn, None, ag__.ConversionOptions(recursive=True, verbose=0, strip_decorators=(ag__.convert, ag__.do_not_convert, ag__.converted_call), force_conversion=False, optional_features=ag__.Feature.ALL, internal_convert_us
View tf_omp_benchmark.py
===== Model =====
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
View foo.cpp
T* out = BaseType::getBuf() +
g * this->packedBufferSize(block.row_size, block.col_size);
for (int i = block.row_start; i < block.row_start + block.row_size; ++i) {
+ auto r = i;
+ int32_t block_row_id = r / BaseType::blockRowSize();
+ int32_t brow_offset = (block_row_id * BaseType::blockCols()) *
+ (BaseType::blockRowSize() * BaseType::blockColSize());
+
+ int32_t inblock_offset_row_cpt = (r % BaseType::blockRowSize() / row_interleave_) *
+ BaseType::blockColSize() * row_interleave_ + r % row_interleave_;
View gist:4beaaca6f6ae4c74a1f0faa60596483f
E0215 20:45:58.278607 2727955 ExceptionTracer.cpp:214] exception stack complete
terminate called after throwing an instance of 'std::runtime_error'
what():
index out of range at caffe2/aten/src/TH/generic/THTensorEvenMoreMath.cpp:214:
operation failed in interpreter:
prev_hypos_indices: Tensor,
num_steps: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
src_tokens0 = torch.t(src_tokens)
_0 = fork(self.__forked_function, src_tokens0, self.encoder_ens.model_0.encoder.embed_tokens.weight, src_lengths, getattr(self.encoder_ens.model_0.encoder.bilstm.layers, "0").weight_ih_l0, getattr(self.encoder_ens.model_0.encoder.bilstm.layers, "0").weight_hh_l0, getattr(self.encoder_ens.model_0.encoder.bilstm.layers, "0").bias_ih_l0, getattr(self.encoder_ens.model_0.encoder.bilstm.layers, "0").bias_hh_l0, getattr(self.encoder_ens.model_0.encoder.bilstm.layers, "0").weight_ih_l0_reverse, getattr(self.encoder_ens.model_0.encoder.bilstm.layers, "0").weight_hh_l0_reverse, getattr(self.encoder_ens.model_0.encoder.bilst
View dict_test.py
import torch
class DictTest(torch.jit.ScriptModule):
__constants__ = ['d']
def __init__(self, d):
super(DictTest, self).__init__()
self.d = d
def forward(self):
View live_code.py
import torch
class MyDecisionGate(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if bool(x.sum() > 0) :
return x
else:
return -x
View gist:9aee9be71a6dbb8f901e8714d8d1bd8b
* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
* frame #0: 0x00007fff62d4db48 libc++abi.dylib`__cxa_throw
frame #1: 0x0000000117f514a4 libtorch.1.dylib`torch::jit::getOperatorFor(node=0x000000011e3ca140) at operator.cpp:603
frame #2: 0x0000000117eff819 libtorch.1.dylib`torch::jit::Node::findSchema(this=0x000000011e3ca140) const at ir.cpp:743
frame #3: 0x00000001175655eb libtorch.1.dylib`torch::jit::Node::schema(this=0x000000011e3ca140) const at ir.h:570
frame #4: 0x0000000117fba7cb libtorch.1.dylib`torch::jit::AliasDb::analyze(this=0x00007ffeefbf90e8, node=0x000000011e3ca140) at alias_analysis.cpp:358
frame #5: 0x0000000117fba3e0 libtorch.1.dylib`torch::jit::AliasDb::analyze(this=0x00007ffeefbf90e8, block=0x000000012070a2e0) at alias_analysis.cpp:300
frame #6: 0x0000000117fa6358 libtorch.1.dylib`torch::jit::AliasDb::analyze(this=0x00007ffeefbf90e8, graph=std::__1::shared_ptr<torch::jit::Graph>::element_type @ 0x000000012071c660 strong=3 weak=1) at alias_an
View gist:19f4810d612f805e7ec527eae0c09f52
graph(%self : Float(1, 5, 3)
%1 : (Float(1, 3), Float(1, 3))
%4 : Float(12, 3)
%5 : Float(12, 3)
%6 : Float(12)
%7 : Float(12)) {
%hx.1 : Float(1, 3), %hx.2 : Float(1, 3) = prim::TupleUnpack(%1)
%8 : int = prim::Constant[value=1](), scope: LSTM
%9 : int = prim::Constant[value=1](), scope: LSTM
%10 : Tensor[] = aten::split(%self, %8, %9), scope: LSTM
You can’t perform that action at this time.
You signed in with another tab or window. Reload to refresh your session. You signed out in another tab or window. Reload to refresh your session.