Skip to content

Instantly share code, notes, and snippets.

@Redchards
Last active November 8, 2018 22:03
Show Gist options
  • Save Redchards/7149e9ac87904b88a922efbc39fc24dc to your computer and use it in GitHub Desktop.
Save Redchards/7149e9ac87904b88a922efbc39fc24dc to your computer and use it in GitHub Desktop.
pytorch implementation of 1D convolutional NN
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import logging
import torchtext.datasets as datasets
import torchtext.data as data
import torchtext
import torch.nn as nn
import torch
class Conv1D(nn.Module):
def __init__(self, in_channels, out_channels, stride, kernel_width):
super(Conv1D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.kernel_width = kernel_width
self.kernel = nn.Linear(kernel_width * in_channels, out_channels)
def forward(self, x):
l = [self.kernel(x[:, :, i - self.kernel_width: i].reshape(x.shape[0], self.in_channels * self.kernel_width)) for i in range(self.kernel_width, x.shape[2], self.stride)]
return torch.stack(l, dim=1)
logging.basicConfig()
# logger = logging.getLogger(« model »)
# logger.setLevel(level=logging.DEBUG)
# text
TEXT = data.Field(lower=True, include_lengths=False, batch_first=True)
LABEL = data.Field(sequential=False, is_target=True)
# make splits for data
train, val, test = datasets.sst.SST.splits(TEXT, LABEL)
# Use the vocabulary
wordemb = torchtext.vocab.GloVe("6B", dim=100)
# Build the vocabularies
# for labels, we use special_first to False so <unk> is last
# (to discard it)
TEXT.build_vocab(train, vectors=wordemb)
LABEL.build_vocab(train, specials_first=False)
train_iter, val_iter, test_iter = data.BucketIterator.splits(
(train, val, test), batch_size=10, device=0)
nn_embeddings = nn.Embedding.from_pretrained(TEXT.vocab.vectors)
model = torch.nn.Conv1d(100, 3, 1, 3)
loss = torch.nn.CrossEntropyLoss()
learning_rate = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for _ in range(100):
for i, batch in enumerate(train_iter):
# print(batch.text, batch.label)
# print(nn_embeddings(batch.text).shape)
print("Batch label num {}".format(batch.label.shape))
optimizer.zero_grad()
d = nn_embeddings(batch.text).transpose(1, 2)
forward_pass = model(d).max(1)[0]
print(forward_pass)
print(batch.label)
err = loss(forward_pass, batch.label)
err.backward()
optimizer.step()
acc = sum([1 if forward_pass[i].max(0)[1] == batch.label[i] else 0 for i in range(forward_pass.shape[0])]) / \
forward_pass.shape[0]
print("Epoch {} : Loss {:.4f}".format(i, err.mean().item()))
print("Accuracy {}".format(acc))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment