Skip to content

Instantly share code, notes, and snippets.

View Saibo-creator's full-sized avatar

Saibo-creator

View GitHub Profile
@Saibo-creator
Saibo-creator / awesome_productivity_tools.md
Created March 3, 2025 09:21
A fwe tools softwares that really enhanced my life as a programmer
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers_cfg.grammar_utils import IncrementalGrammarConstraint
from transformers_cfg.generation.logits_process import GrammarConstrainedLogitsProcessor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
tokenizer = AutoTokenizer.from_pretrained(model_id)
@Saibo-creator
Saibo-creator / test_kv_cache_manual.py
Created January 31, 2025 14:50
Suprisingly, on A100, kv-cache slows down generation consistently regardless of sequence length from 64 to 512; On RTX 20280 Ti, the kv-cache helps a bit
import time
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load Phi-3.5 model and tokenizer
MODEL_NAME = "microsoft/Phi-3.5-mini-instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
# Define a simple prompt
# Version 0.13.2
from transformers import AutoModelForCausalLM
from peft import LoraModel, LoraConfig
config = LoraConfig(
task_type="CAUSAL_LM",
r=8,
lora_alpha=32,
lora_dropout=0.01,
import torch
torch.set_default_dtype(torch.bfloat16)
# check if CUDA is available
if torch.cuda.is_available():
# set default device to cuda
torch.set_default_device("cuda:0")
elif torch.backends.mps.is_available():
torch.set_default_device("mps")
@Saibo-creator
Saibo-creator / setup_new_vm.sh
Last active March 21, 2025 09:34
zsh, conda, tmux
#!/bin/bash
# Check if zsh is installed, and install if not
if ! command -v zsh &> /dev/null; then
echo "Zsh not found. Installing..."
sudo apt update
sudo apt install -y zsh
else
echo "Zsh is already installed."
fi
##########
#
# Original robbyrussel theme has a minimal prompt which doesn't contain username and hostname
# This derived version adds user name and hostname, that's all
#
##########
# Define the return status indicator
local ret_status="%(?:%{$fg_bold[green]%}➜ :%{$fg_bold[red]%}➜ %s)"
import torch
from transformers import GPT2Tokenizer, AutoModelForCausalLM
import numpy as np
import time
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
LogitsProcessorList,
# This contains the Emoticons Unicode block (U+1F600–U+1F64F) , which represents 80 emoticons(emoji)
# that are commonly used in text messaging and social media. c.f. https://en.wikipedia.org/wiki/Emoticons_(Unicode_block)
# This doesn't include the Miscellaneous Symbols and Pictographs block (U+1F300–U+1F5FF) and
# the Supplemental Symbols and Pictographs block (U+1F900–U+1F9FF).
root ::= emoji+
emoji ::= [😀-🙏]
import random
from collections import defaultdict
from itertools import product
from typing import Callable, Optional
import torch
from torch_geometric.data import Data, HeteroData, InMemoryDataset
from torch_geometric.utils import coalesce, remove_self_loops, to_undirected