Skip to content

Instantly share code, notes, and snippets.

View Lyken17's full-sized avatar
🎯
Focusing

Ligeng Zhu Lyken17

🎯
Focusing
View GitHub Profile
@Lyken17
Lyken17 / relay_pass_example.py
Created August 15, 2022 17:05
relay_pass_replace_add-div_to_sub-sub
import numpy as np
from collections import Counter
import tvm
from tvm import relay
from tvm.relay import ExprFunctor, ExprMutator, ExprVisitor
from tvm.relay.expr_functor import ExprMutator, Call
from tvm.relay.dataflow_pattern import wildcard, is_op, is_constant, is_expr, rewrite, DFPatternCallback
@Lyken17
Lyken17 / relay_ast_example.py
Created August 15, 2022 17:05
relay_ast_replace_div2mul
import numpy as np
from collections import Counter
import tvm
from tvm import relay
# from tvm.relay import ExprFunctor, ExprMutator, ExprVisitor
from tvm.relay.expr_functor import ExprMutator, Call
@Lyken17
Lyken17 / snpe_profile.py
Last active August 14, 2022 11:38
SNPE Profile
import torch
import torch.nn as nn
import torchvision
from torchvision import models
batch = 1
dim = 3
res = 224
@Lyken17
Lyken17 / relay_replace_op.py
Created August 5, 2022 07:03
Replace OPs in relay pass
import numpy as np
from collections import Counter
import tvm
from tvm import relay
from tvm.relay import ExprFunctor, ExprMutator, ExprVisitor
from tvm.relay.expr_functor import ExprMutator, Call
import os, sys
import os.path as osp
import math
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms, datasets
from ofa.model_zoo import ofa_net
# dense update
# forward
input: 1, 48, 8, 8
weight: 240, 48, 1, 1
output: 1, 240, 8, 8
# input
# (n, c, h, w) => (1, n * c, h, w)
input_1 = 1, 48, 8, 8
import os, os.path as osp
import json
from copy import deepcopy
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
@Lyken17
Lyken17 / sub-c-poc.py
Created May 4, 2022 13:13
training information
from tokenize import group
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
import warnings
from torch.nn.grad import _grad_input_padding
@torch.no_grad()
@Lyken17
Lyken17 / setup.sh
Created February 27, 2019 00:18
CUDA10+CUDNN5.1-SETUP
# tested on AWS p2.xlarge August 29, 2018
# install CUDA
sudo apt-get update && sudo apt-get install wget -y --no-install-recommends
CUDA_URL="https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda-repo-ubuntu1804-10-1-local-10.1.105-418.39_1.0-1_amd64.deb"
wget -c ${CUDA_URL} -O cuda.deb
sudo dpkg --install cuda.deb
sudo apt-key add /var/cuda-repo-10-1/7fa2af80.pub
sudo apt-get update
sudo apt-get install -y cuda
@Lyken17
Lyken17 / quantize.py
Created April 10, 2022 05:35
quick-quantize-impl
import torch
import torch.nn as nn
import torch.nn.functional as F
def serialize(raw_idx):
raw_idx = raw_idx.clone()
# put 3 int10 into one int32
d = raw_idx.view(-1, 3)
d[:, 0] = d[:, 0] << 20