Skip to content

Instantly share code, notes, and snippets.

View amohant4's full-sized avatar

Abinash Mohanty amohant4

View GitHub Profile
loss_over_last_N_iters = [] # Keep track of loss in last N iterations 
lr = 0.01 # can be anything 
for global_step in range(0,total_steps):
learning_rate = tf.placeholder(tf.float32, shape=[])
change_in_loss = get_loss_change(loss_over_last_N_iters) # determine if the loss is changing or has hit a plateau.
if change_in_loss > theta: 
lr = lr*alpha # Change the learning rate (eg. make it lr/10)
# …
loss = … 
train_step = tf.train.GradientDescentOptimizer( 
global_step = tf.Variable(0, trainable=False) # Variable to store number of iterations
starter_learning_rate = 0.1 # Initial Learning rate
learning_rate = tf.train.exponential_decay(
starter_learning_rate, global_step, # Function applied by TF on the varible (same formula as shown above)
100000, 0.96, staircase=True) # make staircase=True to force an integer division and thus create a step decay
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate) # We create an instance of the optimizer with updated learning rate each time
.minimize(...my loss..., global_step=global_step) # global step (# iterations) is updated by the minimize function
)
@amohant4
amohant4 / mxnet_gluon_lenet.py
Last active May 8, 2021 18:03
Example usage of gluon in MXNet with test case of LeNet for MNIST
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
from mxnet import init, gluon
import time
def create_lenet_using_sequential():
"""
Method to return a lenet using nn.Sequential from
MXNet. nn.Sequential is a subclass of nn.Block. 1
@amohant4
amohant4 / mxnet_gluon_lenet_nnBlock.py
Created July 17, 2019 06:01
Example of creating a network using nn.Block class
from mxnet import nn
class myLeNet(nn.Block):
def __init__(self, **kwargs):
super(myLeNet, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(channels=6,kernel_size=5,activation='relu')
self.pool1 = nn.MaxPool2D(pool_size=2,strides=2)
self.conv2 = nn.Conv2D(channels=16,kernel_size=3,activation='relu')
self.pool1 = nn.MaxPool2D(pool_size=2,strides=2)
self.fc1 = nn.Dense(120, activation='relu')
self.fc2 = nn.Dense(84, activation='relu')
lenet = myLeNet()
lenet.initialize()
lenet(x)
import mxnet as mx
from mxnet import init
# Create a mxnet symbol for the graph ~~~
def create_net_moduleAPI():
"""
Method to create a symbol for LeNet in MXNet.
Arguments: None
Returns: mx.sym for LeNet
"""
@amohant4
amohant4 / octave_conv_pytorch.py
Last active May 11, 2021 02:54
Implementation of octave convolution in pytorch
class OctConv(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size, stride=1, alphas=[0.5,0.5]):
super(OctConv, self).__init__()
# Get layer parameters
self.alpha_in, self.alpha_out = alphas
assert 0 <= self.alpha_in <= 1 and 0 <= self.alpha_in <= 1, \
"Alphas must be in interval [0, 1]"
self.kernel_size = kernel_size
self.stride = stride