Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save Rocketknight1/01699893835d856291a18723a382a2c9 to your computer and use it in GitHub Desktop.
Save Rocketknight1/01699893835d856291a18723a382a2c9 to your computer and use it in GitHub Desktop.
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
import tensorflow as tf
model_name = 'bert-base-cased'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = TFAutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
texts = ["I'm a positive example!", "I'm a negative example!"]
labels = [1, 0]
# Pad the tokenizer outputs to the same length for all samples
processed_text = tokenizer(texts, padding='longest', return_tensors='tf')
labels = tf.convert_to_tensor(labels)
opt = tf.keras.optimizers.Adam(5e-5) # Transformers like lower learning rates
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Model outputs raw logits
model.compile(optimizer=opt, loss=loss)
model.fit(dict(processed_text), labels, epochs=3)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment