Skip to content

Instantly share code, notes, and snippets.

@iewaij
Last active November 27, 2020 14:51
Show Gist options
  • Save iewaij/474e29c2ebed2756931ff4888096c626 to your computer and use it in GitHub Desktop.
Save iewaij/474e29c2ebed2756931ff4888096c626 to your computer and use it in GitHub Desktop.
from pyspark.sql import *
import matplotlib.pyplot as pyplot
import seaborn as sns
import pandas as pd
spark = SparkSession.builder.master("local[*]").appName("MADS 2020").getOrCreate()
data = spark.read.csv("data/machine_log.csv", inferSchema="True", header="True", sep=";")
data_sample = data.sample(fraction=0.1, seed=42)
# some compound have less produced units
compoud_pro = data_sample.groupBy("COMPOUND_ID").avg("NUMBER_RUNS").sort("avg(NUMBER_RUNS)").toPandas()
compoud_pro.plot.bar(x="COMPOUND_ID");
# certain compound is producing more waste
compound_waste = data_sample.groupBy("COMPOUND_ID").avg("WASTE").sort("avg(WASTE)").toPandas()
compound_waste.plot.bar(x="COMPOUND_ID");
# corr of prod and waste
compound_pro_waste = pd.concat([compoud_pro.set_index("COMPOUND_ID"), compound_waste.set_index("COMPOUND_ID")], axis=1)
sns.regplot(data=compound_pro_waste, x="avg(NUMBER_RUNS)", y="avg(WASTE)");
compound_pro_waste.plot.scatter(x="avg(NUMBER_RUNS)", y="avg(WASTE)");
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment