This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for col in categories: | |
cs = dfX[col].value_counts(normalize=False, sort=True, ascending=False) | |
rare_values = [k for k in cs.keys() if cs[k]<40] # Theshold = 40 occurrences | |
if len(rare_values)>0: | |
print( 'Trim values : ', col, len(rare_values)) | |
dfX.loc[dfX[col].isin(rare_values), col] = col+'_rare' | |
# Output : | |
# Trim values : funder 1730 | |
# Trim values : installer 1982 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for col, tp in nullcols: | |
if (tp == str): | |
dfX.loc[dfX[col].isnull(), col] = 'MISSING' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for col, tp in nullcols: | |
if (tp == bool): | |
dfX.loc[dfX[col]==True, col] = 'TRUE' | |
dfX.loc[dfX[col]==False, col] = 'FALSE' | |
dfX.loc[dfX[col].isnull(), col] = 'MISSING' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Find columns with Null values | |
nullcols = [] | |
for col in dfX.columns: | |
nbnull = (dfX[col].isnull()*1).sum() | |
if ( nbnull > 0 ): | |
tp = type(dfX[dfX[col].notnull()][col].iat[0]) # type of first non null value | |
nullcols.append([col, tp]) | |
print(col, nbnull, t) | |
# Output |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from sklearn.ensemble import RandomForestClassifier | |
dfRFC = dfOHE.sample(frac=1) # shuffle the dataset before spliting it in 2 parts | |
dfRFC_trn = dfRFC[0:45000] # training set | |
dfRFC_tst = dfRFC[45000:] # testing set | |
RFC = RandomForestClassifier(n_estimators=20, # number of trees in the "forest" ensemble | |
max_depth=25) # maximum depth of each tree | |
RFC.fit(dfRFC_trn[predictors].values, dfRFC_trn['status_group_enc'].values) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from sklearn.linear_model import LogisticRegression | |
dfLR = dfOHE.sample(frac=1) # shuffle the dataset before spliting it in 2 parts | |
dfLR_trn = dfLR[0:45000] # training set | |
dfLR_tst = dfLR[45000:] # testing set | |
LR = LogisticRegression(multi_class='ovr') # ovr = one (class) versus rest (of classes) | |
LR.fit(dfLR_trn[predictors].values, dfLR_trn['status_group_enc'].values) | |
# model accuracy score between 0% and 100% |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
dfOHE = None | |
for col in categories: # encode 1 category at a time | |
one_hot = pd.get_dummies(df[col], prefix=col) | |
# drop column as it is now encoded | |
if dfOHE is None: | |
dfOHE = df.drop(col, axis=1) | |
else: | |
dfOHE = dfOHE.drop(col, axis=1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
dfX = pd.read_csv('PUMP_training_set_values.csv') # predictive variables | |
dfY = pd.read_csv('PUMP_training_set_labels.csv') # target variable |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Split date_recorded stamp (string) into year, month, day of month, day of week features | |
from dateutil import parser | |
dfX['date_recorded_year'] = dfX['date_recorded'].apply(lambda x: int(x.split('-')[0])) | |
dates.append('date_recorded_year') | |
dfX['date_recorded_month'] = dfX['date_recorded'].apply(lambda x: int(x.split('-')[1])) | |
dates.append('date_recorded_month') | |
# WARNING : probably not usefull for this dataset |