Skip to content

Instantly share code, notes, and snippets.

View rohan-paul's full-sized avatar
🎯
Focusing

Rohan Paul rohan-paul

🎯
Focusing
View GitHub Profile
# Usecase
def make_prompt(input_text: str, system_prompt="", max_length=512) -> str:
"""
Generates text using a large language model, given a prompt and a device.
Args:
input_text (str): The input text for prompting.
system_prompt (str): The system prompt (not used in the function).
max_length (int): The maximum length of the generated text.
import random
import textwrap
import torch # You missed importing the torch module.
device = 'cuda' if torch.cuda.is_available() else 'cpu'
import textwrap
def text_wrapper(text, width=90):
# Wraps the input text to the specified width
import torch
from datasets import load_dataset
from peft import LoraConfig
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
TrainingArguments,
import argparse
import bitsandbytes as bnb
from datasets import load_dataset
from functools import partial
import os
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, AutoPeftModelForCausalLM
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed, Trainer, TrainingArguments, BitsAndBytesConfig, \
DataCollatorForLanguageModeling, Trainer, TrainingArguments
from datasets import load_dataset
def train_one_epoch(model, optimizer, scheduler, dataloader, device, epoch):
model.train()
dataset_size = 0
running_loss = 0.0
bar = tqdm(enumerate(dataloader), total=len(dataloader))
""" The total argument in tqdm specifies the total number of iterations (or updates to the progress bar). In this case, len(dataloader) is used as the total which is the total number of batches in the dataloader. """
for step, data in bar:
ids = data['input_ids'].to(device, dtype = torch.long)
from tensorflow import keras
import tensorflow as tf
class ChildDense(keras.layers.Layer):
def __init__(self, units, activation=None):
super().__init__()
self.units = units
self.activation = activation
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
import statsmodels.api as sm
from sklearn.metrics import mean_squared_error, mean_absolute_error
import math
#Test for staionarity
def test_stationarity(timeseries):
#Determing rolling statistics
rolling_mean = timeseries.rolling(12).mean()
# Now generate TPR, FPR and ROC data
tpr,fpr,roc_auc = ([[]]*number_of_classes for _ in range(3))
f,ax = plt.subplots()
for i in range(number_of_classes):
fpr[i], tpr[i], _ = roc_curve(labels==i, predictions[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
#generating synthetic data
number_of_classes = 5
samples_per_class= 70
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import numpy as np
from tqdm import tqdm
import random
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import warnings