Skip to content

Instantly share code, notes, and snippets.

from transformers import pipeline
classifier = pipeline("text-classification", model="hadifar/xxx")
try:
while True:
user_input = input("Enter a string (Press Ctrl+C to stop): ")
if user_input:
res = classifier([user_input])
model_name = 'model/classifier'
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model.push_to_hub('hadifar/xxx')
tokenizer.push_to_hub('hadifar/xxx')
@hadifar
hadifar / classifier_1.py
Created August 19, 2023 11:26
simple classifier code
import evaluate
import numpy as np
import random
import torch
from datasets import load_dataset
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
@hadifar
hadifar / classical_baseline.py
Created April 25, 2022 15:01
A simple example for SVM
import argparse
import os
import numpy as np
from joblib import dump, load
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
@hadifar
hadifar / parse.py
Last active November 20, 2021 11:49
I copy the code from this repo: https://github.com/iamrkg31/sentence-to-clauses
import re
import nltk
def get_verb_phrases(t):
verb_phrases = []
num_children = len(t)
num_VP = sum(1 if t[i].label() == "VP" else 0 for i in range(0, num_children))
import torch
from transformers import *
import sys, logging
print('cuda available? ', torch.cuda.is_available())
print('how many gpus?', torch.cuda.device_count())
logging.root.handlers = []
logging.basicConfig(level="INFO", format='%(asctime)s:%(levelname)s: %(message)s', stream=sys.stdout)
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
import tensorflow as tf
vocabulary_size = 10000
embedding_size = 64
rnn_size = 64
batch_size = 512
# download dataset
(train_data, train_labels), (test_data, test_labels) = tf.keras.datasets.imdb.load_data(num_words=vocabulary_size)
import tensorflow as tf
tf.enable_eager_execution()
X = tf.constant([[1., 2.], [3., 4.]])
y = tf.constant([[1.], [2.]])
w = tf.get_variable(name='w', shape=[2, 1], initializer=tf.constant_initializer([[1.], [2.]]))
b = tf.get_variable(name='b', shape=[1], initializer=tf.constant_initializer([[1.]]))
import tensorflow as tf
tf.enable_eager_execution()
x = tf.get_variable(initializer=3.0, name='x')
with tf.GradientTape() as tape:
y = tf.square(x)
y_grad = tape.gradient(y, x)