I hereby claim:
- I am moritzkoerber on github.
- I am moritzkoerber (https://keybase.io/moritzkoerber) on keybase.
- I have a public key ASBagXNuNawc5COk1wSUH57zvRWiy4bM8o7ZeCxKWVv06Ao
To claim this, I am signing this object:
import pandas as pd | |
from sklearn.compose import ColumnTransformer | |
from sklearn.impute import SimpleImputer | |
from sklearn.linear_model import LogisticRegression | |
from sklearn.metrics import classification_report | |
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold | |
from sklearn.pipeline import Pipeline | |
from sklearn.preprocessing import OneHotEncoder, StandardScaler | |
titanic = pd.read_csv('./titanic.csv') |
import pandas as pd | |
from sklearn.linear_model import LogisticRegression | |
from sklearn.ensemble import RandomForestClassifier | |
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold | |
from sklearn.pipeline import Pipeline | |
from sklearn.compose import ColumnTransformer | |
from sklearn.preprocessing import OneHotEncoder, StandardScaler | |
from sklearn.metrics import f1_score, classification_report | |
from sklearn.impute import SimpleImputer | |
from sklearn.model_selection import train_test_split |
I hereby claim:
To claim this, I am signing this object:
import argparse | |
import pickle | |
import string | |
import sys | |
import nltk | |
import pandas as pd | |
from nltk.corpus import stopwords | |
from nltk.stem.wordnet import WordNetLemmatizer | |
from nltk.tokenize import word_tokenize |
import pandas as pd | |
df = pd.DataFrame( | |
dict( | |
week=[1, 1, 2, 2, 3, 3] * 2, | |
layout=["classic", "classic", "modern", "modern"] * 3, | |
response=["conversion", "exit"] * 6, | |
cnt=[26, 23, 45, 34, 55, 44, 53, 27, 28, 25, 30, 34], | |
) | |
) |