This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
mask_i = (target >= start) & (target < end) | |
indices_i = mask_i.nonzero().squeeze() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
start = torch.cuda.Event(enable_timing=True) | |
end = torch.cuda.Event(enable_timing=True) | |
start.record() | |
z = x + y | |
end.record() | |
# Waits for everything to finish running | |
torch.cuda.synchronize() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import sys | |
def main(arg): | |
print(arg) | |
if __name__ == '__main__': | |
main(sys.argv[1:]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
$ tree foo | |
foo | |
├── string.py | |
└── test.py | |
0 directories, 2 files | |
$ cat foo/string.py | |
ascii_lowercase='this is personal module' | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
print(labels.detach().numpy()) | |
labels.unsqueeze_(-1) | |
print(labels.detach().numpy()) | |
labels_onehot = torch.FloatTensor(batch_size, 10) | |
labels_onehot.zero_() | |
labels_onehot.scatter_(1, labels, 1) | |
print(labels_onehot.detach().numpy()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Truncated backpropagation | |
def detach(states): | |
return [state.detach() for state in states] | |
# Train the model | |
for epoch in range(num_epochs): | |
# Set initial hidden and cell states | |
states = (torch.zeros(num_layers, batch_size, hidden_size).to(device), | |
torch.zeros(num_layers, batch_size, hidden_size).to(device)) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
with open(path, 'r') as f: | |
for line in f: | |
# no need to lines = f.read().split('\n')[:-1] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Test the model | |
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance) | |
with torch.no_grad(): | |
correct = 0 | |
total = 0 | |
for images, labels in test_loader: | |
images = images.to(device) | |
labels = labels.to(device) | |
outputs = model(images) | |
_, predicted = torch.max(outputs.data, 1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
print ('Epoch [{0:<10d}/{1:>10d}], Loss: {2:=^20.4f}'.format(epoch+1, num_epochs, loss.item())) | |
# Epoch [10 / 60], Loss: =======2.5578======= |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
a = np.arange(10) | |
ft = torch.Tensor(a) # same as torch.FloatTensor | |
it = torch.from_numpy(a) | |
a.dtype # == dtype('int64') | |
ft.dtype # == torch.float32 | |
it.dtype # == torch.int64 |
NewerOlder