Skip to content

Instantly share code, notes, and snippets.

View leslie-fang-intel's full-sized avatar

leslie-fang-intel

View GitHub Profile
This file has been truncated, but you can view the full file.
V0627 17:31:00.663000 139845268738432 torch/_logging/structured.py:19] {"str": ["/localdisk/leslie/torch_inductor_community/pytorch/benchmarks/dynamo/torchbench.py", 0]}
V0627 17:31:00.663000 139845268738432 torch/_logging/structured.py:19] {"str": ["/localdisk/leslie/torch_inductor_community/pytorch/benchmarks/dynamo/common.py", 1]}
V0627 17:31:00.663000 139845268738432 torch/_logging/structured.py:19] {"str": ["/localdisk/leslie/torch_inductor_community/pytorch/torch/_dynamo/eval_frame.py", 2]}
V0627 17:31:00.663000 139845268738432 torch/_logging/structured.py:19] {"str": ["/localdisk/leslie/torch_inductor_community/pytorch/torch/_dynamo/convert_frame.py", 3]}
V0627 17:31:00.663000 139845268738432 torch/_dynamo/convert_frame.py:802] {"dynamo_start": {"stack": [{"line": 456, "name": "<module>", "filename": 0}, {"line": 452, "name": "torchbench_main", "filename": 0}, {"line": 3661, "name": "main", "filename": 1}, {"line": 3593, "name": "process_entry", "filename": 1}, {"line": 4220, "name": "run", "filename":
import torch
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
This file has been truncated, but you can view the full file.
loading model: 0it [00:00, ?it/s]Input ids are automatically padded from 819 to 832 to be a multiple of `config.block_size`: 64
loading model: 0it [00:02, ?it/s]
cpu eval hf_BigBird
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] TRACED GRAPH
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] ===== FROZEN GRAPH =====
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] /localdisk/leslie/torch_inductor_community/pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module):
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] def forward(self):
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] return ()
This file has been truncated, but you can view the full file.
loading model: 0it [00:00, ?it/s]Input ids are automatically padded from 819 to 832 to be a multiple of `config.block_size`: 64
loading model: 0it [00:02, ?it/s]
cpu eval hf_BigBird
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] TRACED GRAPH
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] ===== FROZEN GRAPH =====
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] /localdisk/leslie/torch_inductor_community/pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module):
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] def forward(self):
V0614 00:48:29.163000 140616046391680 torch/_inductor/freezing.py:118] [0/0_1] return ()
# TORCHINDUCTOR_FREEZING=1 TORCH_LOGS="+output_code" numactl -C 56-111 -m 1 python test_softmax.py
import torch
import time
import random
import numpy as np
from torch._inductor import config as inductor_config
# inductor_config.cpp_wrapper = True
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
odel_ in compile_fx is: GraphModule(
(L__mod___conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(L__mod___bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(L__mod___relu): ReLU(inplace=True)
(L__mod___maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(getattr_L__mod___layer1___0___conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(getattr_L__mod___layer1___0___bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(getattr_L__mod___layer1___0___relu): ReLU(inplace=True)
(getattr_L__mod___layer1___0___conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(getattr_L__mod___layer1___0___bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
import torch
def _unsqueeze_multiple(x, dimensions):
for dim in sorted(dimensions):
x = torch.unsqueeze(x, dim)
return x
if __name__ == "__main__":
input = torch.randn(2, 3, 4, 4)
scales = torch.tensor([3, 3, 3])