Skip to content

Instantly share code, notes, and snippets.

@rain1024
rain1024 / pytorch_lightning_simplest_example.py
Last active June 14, 2024 23:45
Simplest Pytorch Lightning Example
import pytorch_lightning as pl
import numpy as np
import torch
from torch.nn import MSELoss
from torch.optim import Adam
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
class SimpleDataset(Dataset):
@SolClover
SolClover / Art027_Python_005.py
Created May 19, 2021 09:05
Plot Silhouette scores for GMM models
# Create empty list
S=[]
# Range of clusters to try (2 to 10)
K=range(2,11)
# Select data for clustering model
X = df_loc[['Latitude', 'Longitude']]
for k in K:
class CustomLSTM(nn.Module):
def __init__(self, input_sz, hidden_sz):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.W = nn.Parameter(torch.Tensor(input_sz, hidden_sz * 4))
self.U = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_sz * 4))
self.init_weights()
class NaiveCustomLSTM(nn.Module):
def __init__(self, input_sz: int, hidden_sz: int):
super().__init__()
self.input_size = input_sz
self.hidden_size = hidden_sz
#i_t
self.U_i = nn.Parameter(torch.Tensor(input_sz, hidden_sz))
self.V_i = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz))
self.b_i = nn.Parameter(torch.Tensor(hidden_sz))
@OniDaito
OniDaito / pytorchvis.py
Created May 14, 2020 16:41
Given a model from pytorch, print out to console and graphviz to see what is going on
# Our drawing graph functions. We rely / have borrowed from the following
# python libraries:
# https://github.com/szagoruyko/pytorchviz/blob/master/torchviz/dot.py
# https://github.com/willmcgugan/rich
# https://graphviz.readthedocs.io/en/stable/
def draw_graph(start, watch=[]):
from graphviz import Digraph
@espoirMur
espoirMur / install_nvidia_driver.md
Last active April 1, 2024 19:22
How I fix this issue NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running

I am no longer abe to monitor this post , I have decided to move everything to my personal blog for better monitoring.

Please click here to access the full post

class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
@timothymugayi
timothymugayi / tqdm_threadpool.py
Created December 6, 2019 15:37
How to run tqdm in multiple threads
import time
from random import randrange
from multiprocessing.pool import ThreadPool
from tqdm import tqdm
def func_call(position, total):
text = 'progressbar #{position}'.format(position=position)
@leopd
leopd / almost-attention.py
Last active April 21, 2023 22:35
Explanatory (non-vectorized) code for how attention works
# This code doesn't work, and isn't intended to.
# The goal of this code is to explain how attention mechansisms work, in code.
# It is deliberately not vectorized to make it clearer.
def attention(self, X_in:List[Tensor]):
# For every token transform previous layer's out
for i in range(self.sequence_length):
query[i] = self.Q * X_in[i]
key[i] = self.K * X_in[i]
value[i] = self.V * X_in[i]
@VictorSanh
VictorSanh / kd.py
Last active July 3, 2024 07:12
Knowledge Distilation
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Optimizer
KD_loss = nn.KLDivLoss(reduction='batchmean')
def kd_step(teacher: nn.Module,
student: nn.Module,
temperature: float,