Skip to content

Instantly share code, notes, and snippets.

@Chillee
Chillee / 1-pw_op_fusion.py
Last active June 2, 2024 13:06
PT 2.0 Benchmarks
import torch
import torch._inductor.config
import time
torch._inductor.config.triton.cudagraphs = False
torch.set_float32_matmul_precision('high')
def bench(f, name=None, iters=100, warmup=5, display=True, profile=False):
for _ in range(warmup):
f()
from timeit import default_timer as time
import numpy as np
from numba import cuda
import os
os.environ['NUMBAPRO_LIBDEVICE']='/usr/lib/nvidia-cuda-toolkit/libdevice/'
os.environ['NUMBAPRO_NVVM']='/usr/lib/x86_64-linux-gnu/libnvvm.so.3.1.0'
import numpy
import torch
import ctypes
@Kaixhin
Kaixhin / lstms.py
Last active June 18, 2021 01:35
Collection of LSTMs
# Collection of LSTM cells (including forget gates)
# https://en.wikipedia.org/w/index.php?title=Long_short-term_memory&oldid=784163987
import torch
from torch import nn
from torch.nn import Parameter
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from torch.autograd import Variable

Using Torch in Atom

Install Atom from https://atom.io or with brew cask install atom

Then install packages (example apm install hydrogen, apm is a package manager for atom):

  • hydrogen
  • autocomplete-paths
  • language-lua