Skip to content

Instantly share code, notes, and snippets.

@curious-attempt-bunny
Last active October 17, 2017 18:16
Show Gist options
  • Save curious-attempt-bunny/9c742708f731a5a6df35f35ab20fcddf to your computer and use it in GitHub Desktop.
Save curious-attempt-bunny/9c742708f731a5a6df35f35ab20fcddf to your computer and use it in GitHub Desktop.
python machine learning recipes

Common Patterns

Imports

import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline

Read JSON

import json
data = json.loads(open('data.json').read())

Map a list

map(lambda x: x*x, [1,2,3,4])

Datasets

Train/Test Split

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)

Classifiers

Comparing Classifiers

from matplotlib import pyplot
%matplotlib inline
from sklearn.model_selection import cross_val_score

from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC

models = [
    ('LR', LogisticRegression()),
    ('LDA', LinearDiscriminantAnalysis()),
    ('AdaBoost', LinearDiscriminantAnalysis()),
    ('KNN', KNeighborsClassifier()),
    ('CART', DecisionTreeClassifier()),
    ('NB', GaussianNB()),
    ('SVM', SVC())]

results = []
names = []
for name, model in models:
	cv_results = cross_val_score(model, X, y, cv=10, n_jobs=-1, scoring='accuracy')
	results.append(cv_results)
	names.append(name)
	print("%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()))

fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()

Confusion Matrix

from sklearn.metrics import confusion_matrix

y_pred = clf.predict(X_test)
confusion_matrix(y_test, y_pred)

Pipelines

from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA

clf = Pipeline([ 
        ('select_best', SelectKBest(score_func=chi2, k=100)),	
        ('rfe', RFE(LogisticRegression(), 60, verbose=1)),
        ('pca', PCA(n_components=20)),
        ('polynomial', PolynomialFeatures(2)),
        ('classify', LogisticRegression())])

Visualization

Imports

import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline

Plots

Histogram Plot

plt.hist([train[ train['Survived'] == 0 ][feature], train[ train['Survived'] == 1 ][feature]])
plt.legend(["Died", "Survived"])

Swarm Plot

sns.swarmplot(x="Pclass", y="Male", hue="Survived", data=train)

Plot matrices

Scatter Matrix

from pandas.tools.plotting import scatter_matrix
scatter_matrix(data)

Histogram Matrix

data.hist()

Density Matrix

data.plot(kind='density', subplots=True, layout=(3,3), sharex=False)

Correlation Matrix

correlations = data.corr()
fig = pyplot.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
pyplot.show()

Box Plot Matrix

data.plot(kind='box', subplots=True, layout=(3,3), sharex=False, sharey=False)

Pair Plot

sns.pairplot(train[['Survived', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare', 'Male']], hue='Survived')

Dimensionality Reduction

Feature Importances

Dan Rufener [9:08 AM] 
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import matplotlib.pyplot as plt

clf = RandomForestRegressor(n_estimators=1000, random_state=0, n_jobs=-1)
clf.fit(X, y)
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X.shape[1]):
   print("%2d) %-*s %f" % (f + 1, 30, X.columns[f], importances[indices[f]]))

plt.figure(figsize=(15,8))
plt.title('Feature Importances')
plt.bar(range(X.shape[1]),
      importances[indices],
      color='lightblue',
      align='center')
plt.xticks(range(X.shape[1]), X.columns[1::], rotation=90)
plt.show()

PCA

Elbow plots

pca = PCA(n_components=50)
pca.fit(X_train)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.title("PCA elbow plot")
plt.xlabel("n_components")
plt.ylabel("cumulative explained variance ratio")
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment