Skip to content

Instantly share code, notes, and snippets.

View comaniac's full-sized avatar

Cody Yu comaniac

View GitHub Profile
@comaniac
comaniac / hf_hub_download_trend_db.json
Last active December 18, 2023 00:11
HF Download Trend DB
This file has been truncated, but you can view the full file.
{
"09-14-22": [
{
"model_id": "bert-base-uncased",
"download": 31330425
},
{
"model_id": "Jean-Baptiste/camembert-ner",
"download": 20521361
},
@comaniac
comaniac / hf_hub_model_size_db.json
Last active January 11, 2023 21:49
HF Model Size DB
{
"roberta-base": {
"model_id": "roberta-base",
"size": 0.124645632,
"code": 0,
"memo": null
},
"tals/albert-xlarge-vitaminc-mnli": {
"model_id": "tals/albert-xlarge-vitaminc-mnli",
"size": 0.058724864,

HuggingFace Model Size

This table documents the top-5000 most download HuggingFace models (during 8/8/2022-9/8/2022) sorted by their sizes. Note that some models that are not Huggingface transformers compatible won't be listed here.

Model #Parameters
bigscience/bloom 176.2B
bigscience/bloom-petals 176.2B
facebook/opt-66b 65.7B
#!/usr/bin/env bash
# The entry point of AWS Batch job. This script is in charge of configuring
# the repo, executing the given command, and uploading the results.
set -e
date
# Parse arguments
SOURCE_REF=$1
REPO=$2
@comaniac
comaniac / launch-github-runner.sh
Created February 10, 2022 01:24
Register and launch a Github Action Runner for org, and remove it when exit.
#!/usr/bin/env bash
set -e
RUNNER_VERSION="2.287.1"
# The path to the new runner.
RUNNER_PATH=$1
# The target Github org.
GITHUB_ORG=$2
# Optional runner label.
import numpy as np
import tvm
from tvm import relay, auto_scheduler
import tvm.relay.testing
from tvm.contrib import graph_executor
def get_network(name, batch_size, layout="NHWC", dtype="float32"):
"""Get the symbol definition and random weight of a network"""
import numpy as np
import tvm
from tvm import te, tir, topi
from tvm.topi import utils
dev = tvm.device("gpu", 0)
target = tvm.target.Target("cuda")
### Copy from topi/cuda/injective.py and make block/thread num configurable
home/ubuntu/meta/src/op/dispatch/tvmjit/unary.cc:55: Error: Failed to JIT mnm_op_erf: RuntimeError:
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
#include <cuda_fp16.h>
__device__ half max(half a, half b)
{
return __hgt(__half(a), __half(b)) ? a : b;
}
__device__ half min(half a, half b)
{
return __hlt(__half(a), __half(b)) ? a : b;
# Pytorch reference
# https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#train-the-network
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import tvm
import timeit
import numpy as np
import torch
import tvm
from tvm import auto_scheduler
import mnm
from mnm.testing.utils import ir_fusion, ir_simplify, get_vm_executor, get_vm_profiler