Skip to content

Instantly share code, notes, and snippets.


Christian Setzkorn csetzkorn

View GitHub Profile
csetzkorn / gist:7b134cd25ccf08c508aeb002ddf699a9
Last active Sep 5, 2018
simple text classification example using keras and word embedding
View gist:7b134cd25ccf08c508aeb002ddf699a9
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
# see also:
csetzkorn / gist:0440cf16f27011609aceda8d052f7877
Created Aug 29, 2018
monte carlo integration - see data camp
View gist:0440cf16f27011609aceda8d052f7877
# Define the sim_integrate function
def sim_integrate(func, xmin, xmax, sims):
x = np.random.uniform(xmin, xmax, sims)
y = np.random.uniform(min(min(func(x)), 0), max(func(x)), sims)
area = (max(y) - min(y))*(xmax-xmin)
result = area * sum(abs(y) < abs(func(x)))/sims
return result
# Call the sim_integrate function and print results
result = sim_integrate(func = lambda x: x*np.exp(x), xmin = 0, xmax = 1, sims = 50)
csetzkorn / gist:f5bbe72d5225151730d60734be3dc4a6
Created Aug 29, 2018
jackknife estimate of median and CI
View gist:f5bbe72d5225151730d60734be3dc4a6
# Leave one observation out to get the jackknife sample and store the median length
median_lengths = []
for i in range(n):
jk_sample = wrench_lengths[index != i]
median_lengths = np.array(median_lengths)
# Calculate jackknife estimate and it's variance
jk_median_length = np.mean(median_lengths)
View gist:84cfb993328904e07c4fd0908552d38c
rsquared_boot, coefs_boot, sims = [], [], 1000
reg_fit = sm.OLS(df['y'], df.iloc[:,1:]).fit()
# Run 1K iterations
for i in range(sims):
# First create a bootstrap sample with replacement with n=df.shape[0]
bootstrap = df.sample(n=df.shape[0], replace=True)
# Fit the regression and append the r square to rsquared_boot
View gist:88fccf1fdb5bfc27bc406f52d763a631
OrginalData <- read.table("",
header = TRUE, sep = "\t")
View gist:628621c87fd97fb018f14882a8497dbf
OrginalData <- read.table("IrisData.txt",
header = TRUE, sep = "\t")
SubsetData <- subset(OrginalData, select = c(
View gist:5b58794316c1983f85d39c29153034ec
import os
from scipy.misc import imread
from scipy.linalg import norm
from scipy import sum, average
def compare_images(img1, img2):
# normalize to compensate for exposure difference, this may be unnecessary
# consider disabling it
img1 = normalize(img1)
View gist:843c2916364618b81e12b926e5b73c0f
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
View gist:9bb42a4a0dc26686d1a8991706acc11f
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt'fivethirtyeight')
df = pd.read_csv('D:/PyCharmProjects/Prophet/Data/AirPassengers.csv')
df['Month'] = pd.DatetimeIndex(df['Month'])
#Prophet also imposes the strict condition that the input columns be named ds (the time column) and y (the metric column)
df = df.rename(columns={'Month': 'ds',
csetzkorn / gist:8ab0c61b06107f10ed5bc542da47240a
Created May 21, 2017
Fit SOM, cluster prototypes and add cluster membership to original dataset
View gist:8ab0c61b06107f10ed5bc542da47240a
OrginalData <- read.table("IrisData.txt",
header = TRUE, sep = "\t")
SubsetData <- subset(OrginalData, select = c("SepalLength", "SepalWidth", "PetalLength", "PetalWidth"))
#TrainingMatrix <- as.matrix(scale(SubsetData))