Skip to content

Instantly share code, notes, and snippets.

View proger's full-sized avatar
🎯
Focusing

Volodymyr Kyrylov proger

🎯
Focusing
View GitHub Profile
"""
Randomized Binary Search Trees
https://www.cs.upc.edu/~conrado/research/papers/jacm-mr98.pdf
"""
import math
import random
from collections import Counter
class root:
// uses https://github.com/HazyResearch/ThunderKittens
#include "tk/src/kittens.cuh"
#include "tk/src/common/pyutils/torch_helpers.cuh"
#define NUM_WORKERS 2 // This kernel uses this many workers in parallel per block, to help issue instructions more quickly.
#define DIMENSION 64 // This kernel operates over 64-dimensional vectors
#define DEBUG 0
using namespace kittens; // this kernel only handles headdim=q_reg.cols for simplicity. Also n should be a multiple of 256 here.
"a linear RNN that receives ones as input and gives increasingly better approximations to pi as output"
import numpy as np
import math
def binary(digits: int):
"Make a basis of powers of two of dimension `digits`, lowest bits first"
return 1 << np.arange(digits)
def leibniz(n):
#%%
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
plt.rcParams['axes.spines.left'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
@proger
proger / abv.py
Last active April 26, 2024 07:53
# prompt: https://twitter.com/francoisfleuret/status/1783479122418716805
import os
os.environ['TORCH_LOGS'] = 'output_code' # shows all the bmms
import torch
torch.set_float32_matmul_precision('high')
N, T, D, U, C = 3, 128, 5, 32, 32 # batch, time, heads, head_dim, dim
S = T
A = torch.randn(N, T, D, U) / U**0.5
@proger
proger / xor.py
Last active April 22, 2024 13:36
tensor network that can learn xor
#%%
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
X = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]]).float()
y = torch.logical_xor(X[:, 0], X[:, 1]).float()
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
"""
# download the model
huggingface-cli download google/gemma-2b-it
# run the server (set the model name here and in the prompt function below)
# notice --kv_cache_dtype fp8_e5m2
docker run --gpus all -p 8000:8000 -e HF_HOME=/hf -e CUDA_VISIBLE_DEVICES=1 -v ~/.cache/huggingface:/hf vllm/vllm-openai:latest:latest --host 0.0.0.0 --model google/gemma-2b-it --kv-cache-dtype fp8_e5m2
# ask one question
echo 'what is python?' | python -m prompt | jq -r '.choices[].text'
import torch
def binary(digits: int) -> torch.IntTensor:
"Make a basis of powers of two of dimension `digits`, lowest bits to the right"
return 1 << torch.arange(digits).flip(-1)
def sbt(x: torch.IntTensor) -> torch.IntTensor:
"""Slow Binary Transform.
@proger
proger / decode.py
Last active January 20, 2024 19:46
import argparse
from itertools import islice
import logging
from pathlib import Path
import evaluate
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.generation import BeamSearchDecoderOnlyOutput
from peft import PeftModel