Skip to content

Instantly share code, notes, and snippets.

@GuokaiLiu
Created March 16, 2021 01:01
Show Gist options
  • Save GuokaiLiu/7dda2c64d0f987edf6049b01b9e13b9c to your computer and use it in GitHub Desktop.
Save GuokaiLiu/7dda2c64d0f987edf6049b01b9e13b9c to your computer and use it in GitHub Desktop.
NutShell
import torch
import torch.nn as nn
import torch.nn.functional as F
# * [1] Inputs and labels
inputs = torch.randn(4,3)
targets = torch.LongTensor([0,2,1,2])
print('[---Inputs and Targets:---]\n',inputs)
print(targets,'\n')
# * [2] Softmax
sfm1 = nn.Softmax(dim=1) #object and instance
sfm2 = F.softmax(input, dim=1) #function requires arguments
print('[---Softmax:---]\n',sfm1(inputs))
print(sfm2,'\n')
# * [3] log_softmax
log1 = torch.log(sfm1(inputs))
log2 = nn.LogSoftmax(dim=1)
print('[---Log-Softmax:---]\n',log1)
print(log2(inputs),'\n')
# * [4] NLLLoss vs CrossEntropyLoss
loss1 = nn.NLLLoss()
print('[---NLLLoss:---]\n', loss1(log2(inputs),targets))
loss2 = nn.CrossEntropyLoss()
print(loss2(inputs, targets))
# * [5] Check
import torch
import torch.nn as nn
logits = torch.randn(4,3)
targets = torch.LongTensor([0,2,1,2])
print(nn.NLLLoss()(torch.log(nn.Softmax(dim=1)(logits)),targets))
print(nn.CrossEntropyLoss()(logits,targets))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment