Skip to content

Instantly share code, notes, and snippets.

View lucidrains's full-sized avatar

Phil Wang lucidrains

View GitHub Profile
var initial_money = 40;
var num_play_times = 30;
var default_bet = 3;
var total_simulations = 1000;
var survives = 0;
var final_money = [];
var flip = function() {
return Math.random() < (18/38);
var initial_money = 40;
var num_play_times = 30;
var default_bet = 3;
var total_simulations = 1000;
var survives = 0;
var final_money = [];
var flip = function() {
return Math.random() < (18/38);
var initial_money = 40;
var num_play_times = 30;
var default_bet = 3;
var total_simulations = 1000;
var survives = 0;
var final_money = [];
var flip = function() {
return Math.random() < (18/38);
import torch
from torch import nn
import torch.nn.functional as F
class Recommend(nn.Module):
def __init__(self, num_items, num_users, dims):
super().__init__()
self.user_embed = nn.Embedding(num_users, dims)
self.item_embed = nn.Embedding(num_items, dims)
self.net = nn.Sequential(
nn.Linear(2 * dims, 4 * dims),
import torch
from torch import nn
import torch.nn.functional as F
class Recommend(nn.Module):
def __init__(self, num_items, num_users, dims):
super().__init__()
self.user_embed = nn.Embedding(num_users, dims)
self.item_embed = nn.Embedding(num_items, dims)
self.net = nn.Sequential(
nn.Linear(2 * dims, 4 * dims),
# LSH attention as described in https://openreview.net/pdf?id=rkgNKkHtvB
# adapted from trax, stripped to what paper said needed to work
# namely that buckets need to be at least 64 with 8 rounds of hashing
# https://github.com/google/trax/blob/master/trax/layers/research/efficient_attention.py#L442
from torch import nn
import torch
def make_unit_length(x, epsilon=1e-6):
norm = x.norm(p=2, dim=-1, keepdim=True)
import torch
import torch.nn as nn
import torch.nn.functional as F
# helpers
def make_unit_length(x, epsilon=1e-6):
norm = x.norm(p=2, dim=-1, keepdim=True)
return x.div(norm + epsilon)
import torch
from torch import nn
from reformer_pytorch import ReformerLM
class ReformerClassifier(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.net = ReformerLM(
num_tokens= 20000,
dim = 1024,
import random
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models import resnet50
from kornia import augmentation as augs
class OutputHiddenLayer(nn.Module):
def __init__(self, net, layer=-2):
w = mapping_network(z) # (batch, depth, dim)
nonlinear = nn.Sequential(
nn.Linear(dim, dim, bias=False),
nn.LeakyRelu(),
nn.Linear(dim, init_channel_dim * 4)
)
init_image_block = nonlinear(mean(w, dim=1)).reshape(batch, 2, 2, init_channel_dim)