Skip to content

Instantly share code, notes, and snippets.

View flxw's full-sized avatar

Felix Wolff flxw

View GitHub Profile
var conn = new Mongo(),
exercises
db = conn.getDB("educatopiadev")
exercises = db.exercises.find()
exercises.forEach(function (exercise) {
db.exercises.update({_id: exercise._id}, { $set: { revisions: [] }});
})
module.exports = {
config: {
// default font size in pixels for all tabs
fontSize: 12.5,
// font family with optional fallbacks
fontFamily: '"Meslo LG S for Powerline"',
// terminal cursor background color and opacity (hex, rgb, hsl, hsv, hwb or cmyk)
cursorColor: 'rgba(248,28,229,0.8)',
module.exports = {
config: {
// default font size in pixels for all tabs
fontSize: 14,
// font family with optional fallbacks
fontFamily: '"Inconsolata for Powerline"',
// terminal cursor background color and opacity (hex, rgb, hsl, hsv, hwb or cmyk)
cursorColor: 'rgba(248,28,229,0.8)',
module.exports = {
config: {
// default font size in pixels for all tabs
fontSize: 14,
// font family with optional fallbacks
fontFamily: '"Inconsolata for Powerline"',
// terminal cursor background color and opacity (hex, rgb, hsl, hsv, hwb or cmyk)
cursorColor: 'rgba(248,28,229,0.8)',
from PIL import Image
import multiprocessing
def test_screen(pic): #Prozess
file_prefix = 'MA_HA1_drawing_'
file_postfix = '.png'
file_name = file_prefix + str(pic) + file_postfix
im = Image.open(file_name)
pix = im.load()
source('load_data.R')
d = read_and_preprocess_data_file('data/BADS_WS1718_known.csv')
library(dplyr)
# 1st idea (the image I previously shared on WhatsApp)
# relationship between total orders (item_count, not transaction count) and return count (again the item_count)
# Are users who order more "wiser" in terms of picking the right items than small-volume users?
orders_and_returns_per_user = d %>%
group_by(user_id) %>%
summarize(nreturn=sum(return), norder=n())
source('load_data.R')
d = read_and_preprocess_data_file('data/BADS_WS1718_known.csv')
d = subset(d, select = -c(delivery_date)) # remove NAs
classdata = read_and_preprocess_data_file('data/BADS_WS1718_class.csv')
classdata = subset(classdata, select = -c(delivery_date)) # remove NAs
# train the final model with 632 bootstrapping
for (iter in 1:400) {
# sample with replacement here - to understand why please refer to the book
source('load_data.R')
d = read_and_preprocess_data_file('data/BADS_WS1718_known.csv')
n = nrow(d)
ratio = sum(d$return) / n
set.seed(1)
accuracy = 100000
randomized_returns = sample(accuracy, n, replace=TRUE)/accuracy
randomized_returns = ifelse(randomized_returns > ratio, 1, 0)
standardize <- function(x){
mu <- mean(x)
std <- sd(x)
result <- (x - mu)/std
return(result)
}
..static_user_statistics <- NULL
..read_and_preprocess_data_file = function(fp) {
source('load_data.R')
# all you need to do
# now two variables are available in your environment:
# df_known - data frame with features from BADS_WS1718_known.csv
# df_class - data frame with features from BADS_WS1718_class.csv