Skip to content

Instantly share code, notes, and snippets.

@goddoe
Created September 15, 2019 10:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save goddoe/9658dcbd300de3c8adee7740859e7f8e to your computer and use it in GitHub Desktop.
Save goddoe/9658dcbd300de3c8adee7740859e7f8e to your computer and use it in GitHub Desktop.
SelfAttention
import torch
import torch.nn as nn
class SelfAttention(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.1):
super(SelfAttention, self).__init__()
self.q = nn.Linear(input_dim, output_dim)
self.k = nn.Linear(input_dim, output_dim)
self.v = nn.Linear(input_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
q = self.dropout(self.q(x))
k = self.dropout(self.k(x))
v = self.dropout(self.v(x))
alpha = torch.softmax(q.transpose(-2, -1) @ k, dim=-1)
return torch.matmul(v, alpha)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment