This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from bayes_opt import BayesianOptimization | |
from sklearn.cross_validation import KFold | |
import xgboost as xgb | |
def xgbCv(train, features, numRounds, eta, gamma, maxDepth, minChildWeight, subsample, colSample): | |
# prepare xgb parameters | |
params = { | |
"objective": "reg:linear", | |
"booster" : "gbtree", | |
"eval_metric": "mae", |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def plot_auc_pr(y_test, y_pred): | |
fig = plt.figure(figsize=(20,7)) | |
plt.subplot(1,2,1) | |
fpr, tpr, _ = roc_curve(y_test, y_pred) | |
auc = roc_auc_score(y_test, y_pred) | |
plt.plot(fpr,tpr,label="auc="+str(auc)) | |
plt.title('roc-auc curve') | |
plt.legend(loc=4) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
imp = pd.DataFrame({'feats':predictors, | |
'ranks': clf.feature_importances_}).sort_values('ranks', ascending=False) | |
fig, ax = plt.subplots(figsize=(12, 15)) | |
# Example data | |
features = np.arange(len(imp.feats)) | |
ranking = imp.ranks | |
ax.barh(features, ranking, align='center', color='skyblue', ecolor='black') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd; | |
import numpy as np; | |
import lightgbm as lgb | |
from bayes_opt import BayesianOptimization | |
from sklearn.model_selection import cross_val_score | |
def lgb_evaluate( | |
numLeaves, | |
maxDepth, | |
scaleWeight, |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from bayes_opt import BayesianOptimization | |
from sklearn.cross_validation import KFold | |
import xgboost as xgb | |
import numpy | |
def xgbCv(train, features, numRounds, eta, gamma, maxDepth, minChildWeight, subsample, colSample): | |
# prepare xgb parameters | |
params = { | |
"objective": "binary:logistic", | |
"booster" : "gbtree", |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
features = ['bedrooms', 'bathrooms', 'sqft_living', | |
'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', | |
'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', | |
'sqft_living15', 'sqft_lot15'] | |
f, ax = plt.subplots(figsize=(16, 12)) | |
plt.title('Pearson Correlation Matrix',fontsize=25) | |
sns.heatmap(df[features].corr(), linewidths=0.25, vmax=1.0, square=True, cmap="BuGn_r", linecolor='k', annot=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{0: 'tench, Tinca tinca', | |
1: 'goldfish, Carassius auratus', | |
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', | |
3: 'tiger shark, Galeocerdo cuvieri', | |
4: 'hammerhead, hammerhead shark', | |
5: 'electric ray, crampfish, numbfish, torpedo', | |
6: 'stingray', | |
7: 'cock', | |
8: 'hen', | |
9: 'ostrich, Struthio camelus', |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def add_datepart(df, fldname, drop=True): | |
fld = df[fldname] | |
if not np.issubdtype(fld.dtype, np.datetime64): | |
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True) | |
targ_pre = re.sub('[Dd]ate$', '', fldname) | |
for n in ('Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', | |
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'): | |
df[targ_pre+n] = getattr(fld.dt,n.lower()) | |
df[targ_pre+'Elapsed'] = fld.astype(np.int64) // 10**9 | |
if drop: df.drop(fldname, axis=1, inplace=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import optuna | |
from collections import Counter | |
# optuna | |
predictors, target, key_id, entity, timestamp, order_by_entity, group_within_entity = build_predictors_naming_conventions(list(df_train.columns.values)) | |
def get_pos_weight(train, label=target): | |
w = Counter(train[label]) | |
return round((w[0]/w[1])*0.90,2) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### Fuzzy matching between two arrays. | |
### for each item in array1, selects the most similar item from array2 | |
import re, math | |
from collections import Counter | |
WORD = re.compile(r'\w+') | |
def get_cosine(vec1, vec2): | |
intersection = set(vec1.keys()) & set(vec2.keys()) |
OlderNewer