Skip to content

Instantly share code, notes, and snippets.

View tiandiao123's full-sized avatar

Cuiqing Li (李崔卿) tiandiao123

  • Shanghai, China
View GitHub Profile
@tiandiao123
tiandiao123 / optimize_bert.py
Created July 1, 2021 22:01 — forked from icemelon/optimize_bert.py
Optimize the BERT model on CPUs
import time
import argparse
import numpy as np
import mxnet as mx
import gluonnlp as nlp
import tvm
from tvm import relay
import tvm.contrib.graph_runtime as runtime
def timer(thunk, repeat=1, number=10, dryrun=3, min_repeat_ms=1000):
@icemelon
icemelon / optimize_bert.py
Last active December 29, 2022 04:09
Optimize the BERT model on CPUs
import time
import argparse
import numpy as np
import mxnet as mx
import gluonnlp as nlp
import tvm
from tvm import relay
import tvm.contrib.graph_runtime as runtime
def timer(thunk, repeat=1, number=10, dryrun=3, min_repeat_ms=1000):
def numeric_score(prediction, groundtruth):
FP = np.float(np.sum((prediction == 1) & (groundtruth == 0)))
FN = np.float(np.sum((prediction == 0) & (groundtruth == 1)))
TP = np.float(np.sum((prediction == 1) & (groundtruth == 1)))
TN = np.float(np.sum((prediction == 0) & (groundtruth == 0)))
return FP, FN, TP, TN
def accuracy(prediction, groundtruth):
FP, FN, TP, TN = numeric_score(prediction, groundtruth)
N = FP + FN + TP + TN
model = mt_models.Unet(drop_rate=0.4, bn_momentum=0.1)
model.cuda()
num_epochs = 10
initial_lr = 0.001
optimizer = optim.Adam(model.parameters(), lr=initial_lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)