Skip to content

Instantly share code, notes, and snippets.

@andrewgiessel
Last active June 25, 2018 13:50
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save andrewgiessel/234529639b3f0747f88e4857bd4e5b9d to your computer and use it in GitHub Desktop.
Save andrewgiessel/234529639b3f0747f88e4857bd4e5b9d to your computer and use it in GitHub Desktop.
Sequential(
(0): Linear(in_features=10800, out_features=1000, bias=True)
(1): ReLU()
(2): Linear(in_features=1000, out_features=100, bias=True)
(3): ReLU()
(4): Linear(in_features=100, out_features=20, bias=True)
(5): Softmax()
)
Inside Sequential backward
Inside class:Sequential
grad_input: <class 'tuple'> torch.cuda.FloatTensor
grad_output: <class 'tuple'> torch.cuda.FloatTensor
grad_input size: torch.Size([128, 20])
grad_output size: torch.Size([128, 20])
grad_input norm: tensor(1.00000e-10 *
1.3390, device='cuda:0')
Sequential(
(0): Conv3d(200, 400, kernel_size=(3, 3, 3), stride=(1, 1, 1))
(1): Dropout3d(p=0.3)
(2): ReLU()
(3): MaxPool3d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
Inside Sequential backward
Inside class:Sequential
grad_input: <class 'tuple'> torch.cuda.FloatTensor
grad_output: <class 'tuple'> torch.cuda.FloatTensor
grad_input size: torch.Size([128, 400, 6, 6, 6])
grad_output size: torch.Size([128, 400, 3, 3, 3])
grad_input norm: tensor(1.00000e-11 *
1.4033, device='cuda:0')
Sequential(
(0): Conv3d(100, 200, kernel_size=(3, 3, 3), stride=(1, 1, 1))
(1): Dropout3d(p=0.3)
(2): ReLU()
(3): MaxPool3d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
Inside Sequential backward
Inside class:Sequential
grad_input: <class 'tuple'> torch.cuda.FloatTensor
grad_output: <class 'tuple'> torch.cuda.FloatTensor
grad_input size: torch.Size([128, 200, 16, 16, 16])
grad_output size: torch.Size([128, 200, 8, 8, 8])
grad_input norm: tensor(1.00000e-12 *
8.4753, device='cuda:0')
Sequential(
(0): Conv3d(4, 100, kernel_size=(3, 3, 3), stride=(1, 1, 1))
(1): Dropout3d(p=0.3)
(2): ReLU()
)
Inside Sequential backward
Inside class:Sequential
grad_input: <class 'tuple'> torch.cuda.FloatTensor
grad_output: <class 'tuple'> torch.cuda.FloatTensor
grad_input size: torch.Size([128, 100, 18, 18, 18])
grad_output size: torch.Size([128, 100, 18, 18, 18])
grad_input norm: tensor(1.00000e-12 *
3.1152, device='cuda:0')
def printgradnorm(self, grad_input, grad_output):
print(self)
print('Inside ' + self.__class__.__name__ + ' backward')
print('Inside class:' + self.__class__.__name__)
print('')
print('grad_input: ', type(grad_input), grad_input[0].type())
print('grad_output: ', type(grad_output), grad_output[0].type())
print('')
print('grad_input size:', grad_input[0].size())
print('grad_output size:', grad_output[0].size())
print('grad_input norm:', grad_input[0].norm())
class CNN3D(nn.Module):
def __init__(self):
super(CNN3D, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv3d(4, 100, kernel_size=3),
nn.Dropout3d(p=0.3),
nn.ReLU()).to(device)
self.layer2 = nn.Sequential(
nn.Conv3d(100, 200, kernel_size=3),
nn.Dropout3d(p=0.3),
nn.ReLU(),
nn.MaxPool3d(kernel_size=2, stride=2)).to(device)
self.layer3 = nn.Sequential(
nn.Conv3d(200, 400, kernel_size=3),
nn.Dropout3d(p=0.3),
nn.ReLU(),
nn.MaxPool3d(kernel_size=2, stride=2)).to(device)
self.fc = nn.Sequential(
nn.Linear(3*3*3*400, 1000),
nn.ReLU(),
nn.Linear(1000, 100),
nn.ReLU(),
nn.Linear(100, 20),
nn.Softmax(dim=1)).to(device)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment