Skip to content

Instantly share code, notes, and snippets.

import numpy as np
%pylab inline
def generate_raw_weights(n, a, b, p):
res = np.zeros((n, n))
for i in range(n):
for j in range(n):
sq_dist = ((i - a)**2 + (j - b)**2) *1.0/ n**2
res[i,j] = exp(-sq_dist*p)
@geffy
geffy / RDM_keras_model.py
Last active July 15, 2016 04:53
Main parts of our custom network for Data Science Game 2016 (online part)
def hard_normalizing(X):
return (X - 0.5) / 0.5
def init_model():
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='valid', input_shape=(3, 64, 64)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
import gym
import numpy as np
env = gym.make('FrozenLake8x8-v0')
env.reset()
# find terminals
def find_terminals(mdp_raw):
terminals = set()
for src_state, node in mdp_raw.items():
import gym
import numpy as np
env = gym.make('FrozenLake8x8-v0')
env.reset()
# find terminals
def find_terminals(mdp_raw):
terminals = set()
for src_state, node in mdp_raw.items():
# Solving as MDP using Value Iteration Algorithm
import gym
import numpy as np
def iterate_value_function(v_inp, gamma, env):
ret = np.zeros(env.nS)
for sid in range(env.nS):
temp_v = np.zeros(env.nA)
for action in range(env.nA):
import gym
import numpy as np
env = gym.make('FrozenLake8x8-v0')
env.reset()
# policy obtained from solving MDP
policy = np.array(
[3, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 2,
import numpy as np
import tensorflow as tf
import os
import glob
def tf2npz(tf_path, export_folder='/ssd/yt8m/data_npz/'):
vid_ids = []
labels = []
mean_rgb = []
mean_audio = []
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@geffy
geffy / bagging.py
Created October 7, 2017 17:21
Example of bagging
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 23:16:44 2017
@author: Marios Michailidis
This is an example of a simple method that performs bagging
"""
@geffy
geffy / stacking_example.py
Created October 7, 2017 17:33
Stacking example
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 23:16:44 2017
@author: Marios Michailidis
This is an example that performs stacking to improve mean squared error
This examples uses 2 bases learners (a linear regression and a random forest)
and linear regression (again) as a meta learner to achieve the best score.
The initial train data are split in 2 halves to commence the stacking.