Skip to content

Instantly share code, notes, and snippets.

@joaopcnogueira
Last active July 11, 2019 14:55
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save joaopcnogueira/d25a76bafb9c46baa5221ecc203c151d to your computer and use it in GitHub Desktop.
Save joaopcnogueira/d25a76bafb9c46baa5221ecc203c151d to your computer and use it in GitHub Desktop.
K-fold cross-validation with pipeline
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from category_encoders import OneHotEncoder
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
# lendo o dataset
df = pd.read_csv("train.csv")
# retirando colunas com nome, ingresso e cabine dos conjuntos
df.drop(["Name", "Ticket", "Cabin"], axis=1, inplace=True)
# criando o modelo usando pipeline
model = Pipeline(steps=[
('one-hot encoder', OneHotEncoder()),
('imputer', SimpleImputer(strategy='mean')),
('tree', DecisionTreeClassifier(max_depth=3, random_state=0))
])
# validando o modelo usando 5-fold cross-validation
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
results = cross_validate(model, X=df.drop(['Survived'], axis=1), y=df['Survived'], cv=kfold)
print("Average accuracy: %f (%f)" %(results['test_score'].mean(), results['test_score'].std()))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment