Skip to content

Instantly share code, notes, and snippets.

View davidnvq's full-sized avatar
🎯
Focusing

Van-Quang Nguyen davidnvq

🎯
Focusing
View GitHub Profile
@etendue
etendue / test.py
Created April 24, 2019 18:59
test script
import multiprocessing as mp
import ai2thor.controller
import numpy as np
import time
import os
actions = [
'MoveAhead',
'MoveBack',
'MoveRight',
@davidnvq
davidnvq / lr_adjust.py
Last active April 25, 2019 02:06
Adaptive learning rate example from ImageNet
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L392-L396
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Create a learning rate adjustment function that divides the learning rate by 10 every 30 epochs
def adjust_learning_rate(epoch):
@davidnvq
davidnvq / dynamic_rnn.py
Last active October 17, 2023 21:33
Define a Dynamic RNN with pack_padded_sequence and pad_packed_sequence
import torch
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class DynamicRNN(nn.Module):
"""
The wrapper version of recurrent modules including RNN, LSTM
that support packed sequence batch.
"""
@williamFalcon
williamFalcon / Pytorch_LSTM_variable_mini_batches.py
Last active April 24, 2024 17:53
Simple batched PyTorch LSTM
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
"""
Blog post:
Taming LSTMs: Variable-sized mini-batches and why PyTorch is good for your health:
https://medium.com/@_willfalcon/taming-lstms-variable-sized-mini-batches-and-why-pytorch-is-good-for-your-health-61d35642972e
"""
@jeasinema
jeasinema / weight_init.py
Last active May 25, 2023 09:32
A simple script for parameter initialization for PyTorch
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import torch
import torch.nn as nn
import torch.nn.init as init
def weight_init(m):
'''
@davidnvq
davidnvq / session.sh
Created March 6, 2018 14:16
Multi-session in terminal
# Using two packages `tmux` (Ctrl+B) and `screen` (Ctrl+A):
#1. Create a new session:
#tmux
tmux new -s session_name
#screen
screen -S session_name
#2. Attach to the session:
@ihsgnef
ihsgnef / colorize_text.py
Last active May 22, 2024 09:56
Visualize attention over text with background colors
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def colorize(words, color_array):
# words is a list of words
# color_array is an array of numbers between 0 and 1 of length equal to words
cmap = matplotlib.cm.get_cmap('RdBu')
template = '<span class="barcode"; style="color: black; background-color: {}">{}</span>'
colored_string = ''
@karpathy
karpathy / nes.py
Last active June 28, 2024 12:59
Natural Evolution Strategies (NES) toy example that optimizes a quadratic function
"""
A bare bones examples of optimizing a black-box function (f) using
Natural Evolution Strategies (NES), where the parameter distribution is a
gaussian of fixed standard deviation.
"""
import numpy as np
np.random.seed(0)
# the function we want to optimize
@karpathy
karpathy / pg-pong.py
Created May 30, 2016 22:50
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
@karpathy
karpathy / min-char-rnn.py
Last active July 16, 2024 17:48
Minimal character-level language model with a Vanilla Recurrent Neural Network, in Python/numpy
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License
"""
import numpy as np
# data I/O
data = open('input.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)