Skip to content

Instantly share code, notes, and snippets.

View bgweber's full-sized avatar

Ben Weber bgweber

View GitHub Profile
library(boot)
data <- read.csv("UserSessions.csv")
# Function for computing the difference of differences
run_DiD <- function(data, indices){
d <- data[indices,]
new <- mean(d$postval[d$group=='Test'])/mean(d$priorval[d$group=='Test'])
old <-mean(d$postval[d$expgroup=='Control'])/mean(d$priorval[d$expgroup=='Control'])
return((new - old)/old * 100.0)
}
library(CausalImpact)
data <- read.csv(file = "DailySessions.csv")
# Create a DataFrame and plot the input data
ts <- cbind(data$test, data$control)
matplot(ts, type = "l")
# Use two week prior and post periods and plot results
pre.period <- c(1, 14)
post.period <- c(15, 30)
@bgweber
bgweber / pandasUDF.py
Last active May 19, 2022 09:19
Distributing Feature Generation with Pandas UDFs
import featuretools as ft
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def apply_feature_generation(pandasInputDF):
# create Entity Set representation
es = ft.EntitySet(id="events")
es = es.entity_from_dataframe(entity_id="events", dataframe=pandasInputDF)
es = es.normalize_entity(base_entity_id="events", new_entity_id="users", index="user_id")
@bgweber
bgweber / feature_generation.py
Created September 16, 2018 19:35
Generating Features for Raw Event Data
import featuretools as ft
rawEventsDF = ... # load from data warehouse
# 1-hot encode the raw event data
es = ft.EntitySet(id="events")
es = es.entity_from_dataframe(entity_id="events", dataframe=rawDataDF)
feature_matrix, defs = ft.dfs(entityset=es, target_entity="events", max_depth=1)
encodedDF, encoders = ft.encode_features(feature_matrix, defs)
# create feature encodings for the event and description fields
es = ft.EntitySet(id="plays")
es = es.entity_from_dataframe(entity_id="plays", dataframe=plays_df, index="play_id",
variable_types = { "event": ft.variable_types.Categorical,
"description": ft.variable_types.Categorical })
f1 = Feature(es["plays"]["event"])
f2 = Feature(es["plays"]["description"])
encoded, _= ft.encode_features(plays_df, [f1, f2], top_n=10)
import numpy as np
import pandas as pd
# load the boston data set
from sklearn.datasets import load_boston
boston = load_boston()
# convert to a Pandas Data Frame
boston_pd = pd.DataFrame(data= np.c_[boston['data'],boston['target']],
columns= np.append(boston['feature_names'], 'target')).sample(frac=1)
from sklearn.linear_model import LinearRegression
from scipy.stats.stats import pearsonr
# split into data and label arrays
y = boston_pd['target']
X = boston_pd.drop(['target'], axis=1)
# create training (~80%) and test data sets
X_train = X[:400]
X_test = X[400:]
from pyspark.ml.feature import VectorAssembler
# convert to a Spark data frame
boston_sp = spark.createDataFrame(boston_pd)
display(boston_sp.take(5))
# split into training and test spark data frames
boston_train = spark.createDataFrame(boston_pd[:400])
boston_test = spark.createDataFrame(boston_pd[400:])
# linear regresion with Spark
from pyspark.ml.regression import LinearRegression
# linear regression
lr = LinearRegression(maxIter=10, regParam=0.1,
elasticNetParam=0.5, labelCol="target")
# Fit the model
model = lr.fit(boston_train)
boston_pred = model.transform(boston_test)
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.evaluation import RegressionEvaluator
crossval = CrossValidator(estimator=LinearRegression(labelCol = "target"),
estimatorParamMaps=ParamGridBuilder().addGrid(
LinearRegression.elasticNetParam, [0, 0.5, 1.0]).build(),
evaluator=RegressionEvaluator(
labelCol = "target", metricName = "r2"),
numFolds=10)