Skip to content

Instantly share code, notes, and snippets.

View francoisstamant's full-sized avatar

François St-Amant francoisstamant

View GitHub Profile
import torch
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification, Trainer, TrainingArguments
import pandas as pd
from datasets import load_dataset
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
import evaluate
import numpy as np
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
knapsack_model.optimize()
#Get list of values
for i in range(len(my_list)):
print('%s: %g' % (my_list[i], knapsack_model.getVars()[i].x))
#Define objective
obj_fn = sum((pleasure[i]*x[i] + (wellbeing[i] * x[i])*0.75) for i in range(N))
knapsack_model.setObjective(obj_fn, GRB.MAXIMIZE)
#Add constraints to the model
#The values
x = knapsack_model.addVars(N, vtype = GRB.INTEGER, name="x")
#The total cost must be inferior to our budget
knapsack_model.addConstr(sum(cost_dollars[i]*x[i] for i in range(N)) <= total_budget)
#Minimum for each aspect must be filled
knapsack_model.addConstrs(x[i] >= minimum_time[i] for i in range(N))
from gurobipy import *
#Initialize the problem
my_list = ['work','errands','gym', 'yoga', 'gaming', 'netflix', 'beer_friends', 'restaurant', 'golf', 'piano']
#Weekly values
cost_dollars = [1, 1, 1, 15, 5, 1, 15, 15, 30, 1]
pleasure = [4, 1, 1, 6, 8, 7, 10, 9, 9, 7]
wellbeing = [8, 9, 10, 9, 3, 3, 2, 3, 7, 7]
minimum_time = [35, 1, 3, 0, 0, 0, 0, 0, 0, 0]
import sys
sys.path.append('LOCATION_OF_YOUR_FOLDER\name_of_file_with_code_to_reuse.py')
from Monte_Carlo_Simulation import Monte_Carlo
results = Monte_Carlo(iterations=1000, variables=['Salary','Location'],
weights=[0.6,0.4], grade=[[3,6],[8,8.5]])
import random
def Monte_Carlo(iterations, variables, weights, grade):
final_results=[]
for n in range(iterations):
results=[]
for i in range(len(variables)):
value = weights[i] * (random.uniform(grade[i][0], grade[i][1]))
results.append(value)
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
df = pd.read_csv('house_prices.csv', sep=';')
#One hot encoding
neighborhoods = pd.get_dummies(df.Neighborhood, prefix='In_')
houses = pd.concat([df,neighborhoods], axis=1)
houses = houses.drop(['Neighborhood','House_Id'], axis=1)
import pandas as pd
import gspread
from gspread_dataframe import set_with_dataframe
# Acces google sheet
gc = gspread.service_account(filename= ”location of your JSON file”)
sh = gc.open_by_key('spreadsheetID')
worksheet = sh.worksheet('WorksheetName')
# Add data to sheet
outputs = learn.blurr_generate(text_to_generate, early_stopping=False, num_return_sequences=1)
for idx, o in enumerate(outputs):
print(f'=== Prediction {idx+1} ===\n{o}\n')