Skip to content

Instantly share code, notes, and snippets.

@aurotripathy
aurotripathy / SSD_300x300.txt
Last active November 29, 2016 02:07
SSD train net
name: "spacenet_SSD_300x300_train"
layer {
name: "data"
type: "AnnotatedData"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
"""
Time the all_reduce_sum operation with model parameters size of the VGG-16 model (~138M floats)
1. Paramters are loaded into each of the N=4 GPUs
2. nccl.all_reduce is invoked on the paramters
TO get a breakdown of the VGG model size, see...
https://stackoverflow.com/questions/28232235/how-to-calculate-the-number-of-parameters-of-convolutional-neural-networks
"""
from __future__ import print_function
import torch
@aurotripathy
aurotripathy / plot_vals.py
Last active December 3, 2019 23:17
plot of Top1/Top5 validation accuracy for ImageNet training
import json
from pudb import set_trace
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
with open('good_run_raport.json') as json_file:
data = json.load(json_file)
print("top 5")
@aurotripathy
aurotripathy / imagenet-validation.py
Created December 3, 2019 22:53
This script does a quick-and-dirty validation of the ImageNet dataset.
""
This script does a quick-and-dirty validation of the ImageNet dataset.
TODO - a quick EDA (exploratory data analysis), which is customary before any analysis
"""
import os
import glob
import imghdr
import argparse
nb_classes = 1000
""" Demonstrates the easy of integration of a custom layer """
import math
import torch
import torch.nn as nn
import numpy as np
class MyLinearLayer(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self, size_in, size_out):
super().__init__()
""" Demonstrates the easy of integration of a custom layer """
import math
import torch
import torch.nn as nn
import numpy as np
class MyLinearLayer(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self, size_in, size_out):
super().__init__()
self.size_in, self.size_out = size_in, size_out
weights = torch.Tensor(size_out, size_in)
self.weights = nn.Parameter(weights) # nn.Parameter is a Tensor that's a module parameter.
bias = torch.Tensor(size_out)
self.bias = nn.Parameter(bias)
class MyLinearLayer(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self, size_in, size_out):
super().__init__()
self.size_in, self.size_out = size_in, size_out
weights = torch.Tensor(size_out, size_in)
self.weights = nn.Parameter(weights) # nn.Parameter is a Tensor that's a module parameter.
bias = torch.Tensor(size_out)
self.bias = nn.Parameter(bias)
class BasicModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 128, 3)
# self.linear = nn.Linear(256, 2)
self.linear = MyLinearLayer(256, 2)
def forward(self, x):
x = self. conv(x)
x = x.view(-1, 256)
torch.manual_seed(0) # for repeatable results
basic_model = BasicModel()
inp = np.array([[[[1,2,3,4], # batch(=1) x channels(=1) x height x width
[1,2,3,4],
[1,2,3,4]]]])
x = torch.tensor(inp, dtype=torch.float)
print('Forward computation thru model:', basic_model(x))