Skip to content

Instantly share code, notes, and snippets.

View imdb_evaluate_epoch_end_pytorch.py
epoch_history["loss"].append(sum(batch_history["loss"]) / len(batch_history["loss"]))
epoch_history["accuracy"].append(sum(batch_history["accuracy"]) / len(batch_history["accuracy"]))
model.eval()
print("Validation...")
val_loss, val_accuracy = validate(model, valid_loader)
epoch_history["val_loss"].append(val_loss)
epoch_history["val_accuracy"].append(val_accuracy)
print(f"{epoch_history=}")
View imdb_pytorch_training.py
from tqdm import tqdm, trange
for i in trange(epochs, unit="epoch", desc="Train"):
model.train()
with tqdm(train_loader, desc="Train") as tbatch:
for i, (samples, targets) in enumerate(tbatch):
model.train()
samples = samples.to(device).long()
targets = targets.to(device)
model.zero_grad()
predictions, _ = model(samples.transpose(0, 1))
View initialize_training_imdb_pytorch.py
model = BiLSTM(input_dim, embedding_dim, hidden_dim)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
criterion = nn.BCELoss().to(device)
optimizer = torch.optim.Adam(model.parameters())
model.to(device)
batch_history = {
"loss": [],
"accuracy": []
}
epoch_history = {
View bilstm_keras_to_pytorch_updated.py
import torch
from torch import nn
class BiLSTM(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim):
super().__init__()
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.encoder = nn.Embedding(input_dim, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim,
View keras_to_pytorch_init.py
def init_weights(self):
self.encoder.weight.data.uniform_(-0.5, 0.5)
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
for t in ih:
nn.init.xavier_uniform(t)
for t in hh:
nn.init.orthogonal(t)
for t in b:
View bilstm_keras_to_pytorch_naive.py
import torch
from torch import nn
class BiLSTM(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim):
super().__init__()
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.encoder = nn.Embedding(input_dim, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim,
View imdb_bilstm_classifier.py
from tensorflow.keras.layers import Bidirectional, LSTM, Dense, Embedding
from tensorflow.keras.models import Sequential
model = Sequential([
Embedding(input_dim, embedding_dim),
Bidirectional(LSTM(hidden_dim, return_sequences=True)),
Bidirectional(LSTM(hidden_dim)),
Dense(1, activation="sigmoid")
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=["accuracy"])
model.summary()
View imdb_adapt_pytorch.py
from torch.utils.data import TensorDataset, DataLoader
train_data = TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
valid_data = TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val))
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size, drop_last=True)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size, drop_last=True)
View print_imdb_sample.py
index_offset = 3
word_index = imdb.get_word_index(path="imdb_word_index.json")
word_index = {k: (v + index_offset) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2
word_index["<UNUSED>"] = 3
index_to_word = { v: k for k, v in word_index.items()}
def recover_text(sample, index_to_word):
View keras_imdb_padding.py
from tensorflow.keras.preprocessing.sequence import pad_sequences
maxlen = 200
x_train = pad_sequences(x_train, maxlen=maxlen)
x_val = pad_sequences(x_val, maxlen=maxlen)