Skip to content

Instantly share code, notes, and snippets.

def AttentionMask(encoder_len, state_len, decoder_len, offset=0, near_decay=0, far_decay=0, device='cpu'):
m = -offset*np.tri(decoder_len, encoder_len+decoder_len+state_len, encoder_len)
for i in range(encoder_len+decoder_len-1):
m += np.tri(decoder_len, encoder_len+decoder_len+state_len, encoder_len-i-1)
if state_len:
ms = np.zeros((state_len, encoder_len+decoder_len+state_len))
m = np.concatenate([m, ms], axis=0)
m = torch.tensor(m, dtype=torch.float32, device=device)
mx = 1-np.tri(decoder_len, encoder_len+decoder_len, encoder_len)
mx = np.concatenate([mx, np.zeros((decoder_len, state_len))], axis=1)
import torch
import torch.nn as nn
class FIR(nn.Module):
def __init__(self, in_dim, out_dim=None, hidden_dim=None, segment_sizes=[1,2,4,8], activation=nn.functional.gelu, device='cpu'):
super().__init__()
if not out_dim: out_dim = in_dim
if not hidden_dim: hidden_dim = in_dim
cursor = 1
nodes = [cursor]
import torch
import torch.nn as nn
# Linear Segment Additive Memory
class LSAM(nn.Module):
def __init__(self, in_dim, out_dim=None, hidden_dim=None, segment_sizes=[1,2,4,8], activation=nn.functional.gelu, device='cpu'):
super().__init__()
if not out_dim: out_dim = in_dim
if not hidden_dim: hidden_dim = in_dim + out_dim
cursor = 1
#!/usr/bin/python3
# export PYTHONIOENCODING=UTF-8
gpt2_size = "gpt2"
import numpy as np
from transformers import TFGPT2LMHeadModel, GPT2Tokenizer, GPT2Config
tokenizer = GPT2Tokenizer.from_pretrained(gpt2_size)
gpt2 = TFGPT2LMHeadModel.from_pretrained(gpt2_size)
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
float RandomFloat() {
int r = rand() - rand();
return float(r) / float(RAND_MAX);
}
#!/usr/bin/python3
#export PYTHONIOENCODING=UTF-8
print("Loading. This might take a while...")
import os
import sys
from transformers import AutoModelWithLMHead, AutoTokenizer
from datetime import date
#model_type = 'gpt2' # Much lower resource use! Also lower intelligence.
#include <stdio.h>
#include <SDL.h>
#include <GL/glew.h>
#include <math.h>
#define BTN_QUIT 256
#define BTN_MOUSE0 257
#define BTN_MOUSE1 258
#define BTN_MOUSE2 259
#define BTN_MOUSE3 260
@umbra-scientia
umbra-scientia / mob.js
Last active April 7, 2020 16:50
symbolic differential calculus
var mob = {
num: function(v) { return {op: 'num', value: v, args: []}; },
sym: function(s) { return {op: 'sym', symbol: s, args: []}; },
add: function(x, y) { return {op: 'add', args: [x, y]}; },
mul: function(x, y) { return {op: 'mul', args: [x, y]}; },
sum: function(v) { return {op: 'add', args: v}; },
prod: function(v) { return {op: 'mul', args: v}; },
neg: function(x) { return {op: 'neg', args: [x]}; },
recip: function(x) { return {op: 'recip', args: [x]}; },
sub: function(x, y) { return mob.add(x, mob.neg(y)); },
@umbra-scientia
umbra-scientia / loader.asm
Created July 25, 2019 22:40
chroot jail loader for linux/amd64
[BITS 64]
[ORG 0x100000]
ElfHeader:
db 0x7F, "ELF" ; e_ident / EI_MAG*
db 2 ; e_ident / EI_CLASS
db 1 ; e_ident / EI_DATA
db 1 ; e_ident / EI_VERSION
db 3 ; e_ident / EI_OSABI
db 0 ; e_ident / EI_ABIVERSION