Skip to content

Instantly share code, notes, and snippets.

@FavioVazquez
Created February 26, 2019 18:33
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
Star You must be signed in to star a gist
Save FavioVazquez/74de3dbb21a29a2386f4591d3f33f476 to your computer and use it in GitHub Desktop.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__weights_dict = dict()
def load_weights(weight_file):
if weight_file == None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.convolution2d_1 = self.__conv(2, name='convolution2d_1', in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.convolution2d_2 = self.__conv(2, name='convolution2d_2', in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.convolution2d_3 = self.__conv(2, name='convolution2d_3', in_channels=15, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.convolution2d_4 = self.__conv(2, name='convolution2d_4', in_channels=15, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.dense_1 = self.__dense(name = 'dense_1', in_features = 2304, out_features = 512, bias = True)
self.dense_2 = self.__dense(name = 'dense_2', in_features = 512, out_features = 10, bias = True)
def forward(self, x):
convolution2d_1_pad = F.pad(x, (1, 1, 1, 1))
convolution2d_1 = self.convolution2d_1(convolution2d_1_pad)
activation_1 = F.relu(convolution2d_1)
convolution2d_2 = self.convolution2d_2(activation_1)
activation_2 = F.relu(convolution2d_2)
maxpooling2d_1 = F.max_pool2d(activation_2, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
dropout_1 = F.dropout(input = maxpooling2d_1, p = 0.25, training = self.training, inplace = True)
convolution2d_3_pad = F.pad(dropout_1, (1, 1, 1, 1))
convolution2d_3 = self.convolution2d_3(convolution2d_3_pad)
activation_3 = F.relu(convolution2d_3)
convolution2d_4 = self.convolution2d_4(activation_3)
activation_4 = F.relu(convolution2d_4)
maxpooling2d_2 = F.max_pool2d(activation_4, kernel_size=(2, 2), stride=(2, 2), padding=0, ceil_mode=False)
dropout_2 = F.dropout(input = maxpooling2d_2, p = 0.25, training = self.training, inplace = True)
flatten_1 = dropout_2.view(dropout_2.size(0), -1)
dense_1 = self.dense_1(flatten_1)
activation_5 = F.relu(dense_1)
dropout_3 = F.dropout(input = activation_5, p = 0.5, training = self.training, inplace = True)
dense_2 = self.dense_2(dropout_3)
activation_6 = F.softmax(dense_2)
return activation_6
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1: layer = nn.Conv1d(**kwargs)
elif dim == 2: layer = nn.Conv2d(**kwargs)
elif dim == 3: layer = nn.Conv3d(**kwargs)
else: raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
@staticmethod
def __dense(name, **kwargs):
layer = nn.Linear(**kwargs)
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment