Skip to content

Instantly share code, notes, and snippets.

@xmfan
Created December 21, 2023 00:34
Show Gist options
  • Save xmfan/5be2e55a906ae91a38eb360a87ba0bc7 to your computer and use it in GitHub Desktop.
Save xmfan/5be2e55a906ae91a38eb360a87ba0bc7 to your computer and use it in GitHub Desktop.
===== Compiled autograd graph =====
<eval_with_key>.53 class CompiledAutograd(torch.nn.Module):
def forward(self, inputs, sizes, hooks):
# No stacktrace found for following nodes
getitem: "f32[]" = inputs[0]
getitem_1: "f32[s0]" = inputs[1]; inputs = None
getitem_2: "Sym(s1)" = sizes[0]
getitem_3: "Sym(s1)" = sizes[1]
getitem_4: "Sym(s3)" = sizes[2]
getitem_5: "Sym(s4)" = sizes[3]
getitem_6: "Sym(s4)" = sizes[4]; sizes = None
expand: "f32[s1]" = torch.ops.aten.expand.default(getitem, [getitem_2]); getitem = getitem_2 = None
getitem_7 = hooks[0]; hooks = None
call_backward = torch__dynamo_external_utils_call_backward(getitem_7, (getitem_1, getitem_1), expand); getitem_7 = expand = None
getitem_8: "f32[s4]" = call_backward[0]; call_backward = None
accumulate_grad_ = torch.ops.inductor.accumulate_grad_.default(getitem_1, getitem_8); getitem_1 = getitem_8 = None
return []
...
# Dynamo graph
===== __compiled_fn_3 =====
<eval_with_key>.54 class GraphModule(torch.nn.Module):
def forward(self, L_inputs_0_ : torch.Tensor, s0 : torch.SymInt, L_inputs_1_ : torch.Tensor, L_sizes_0_ : torch.SymInt):
getitem = L_inputs_0_
x2 = L_inputs_1_
l_sizes_0_ = L_sizes_0_
# File: <eval_with_key>.53:12, code: expand = torch.ops.aten.expand.default(getitem, [getitem_2]); getitem = getitem_2 = None
expand = torch.ops.aten.expand.default(getitem, [l_sizes_0_]); getitem = l_sizes_0_ = None
# File: /data/users/xmfan/core/pytorch/test/inductor/test_compiled_autograd.py:435, code: return gO * torch.cos(x1) * torch.cos(x2)
cos = torch.cos(x2)
mul = expand * cos; expand = cos = None
cos_1 = torch.cos(x2)
getitem_8 = mul * cos_1; mul = cos_1 = None
# File: <eval_with_key>.53:16, code: accumulate_grad_ = torch.ops.inductor.accumulate_grad_.default(getitem_1, getitem_8); getitem_1 = getitem_8 = None
accumulate_grad__default = torch.ops.inductor.accumulate_grad_.default(x2, getitem_8); x2 = getitem_8 = None
return ()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment