Skip to content

Instantly share code, notes, and snippets.

View toniesteves's full-sized avatar

Toni Esteves toniesteves

View GitHub Profile
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
items = ['Andrew Ng', 'François Chollet', 'Josh Stamer',
'Kent Beck', 'Erick Evans', 'Soumith Chintala'];
def find_all_pairs(items):
for item_a in items:
for item_b in items:
print(item_a + ' - ' + item_b)
find_all_pairs(items)
from itertools import chain, combinations
authors = ['Andrew Ng', 'François Chollet', 'Josh Stamer', 'Kent Beck', 'Erick Evans', 'Soumith Chintala'];
def subsets(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
print(list(subsets(authors)))
items = [1, 2, 3, 4, 5, 6 , 7, 8, 9, 10]
def binary_search(alist, item):
first = 0
last = len(alist)-1
found = False
while first <= last and not found:
midpoint = (first + last)//2
import time
lot_of_items = ['coffee'] * 100000
def find_coffee(items):
start = time.time()
for item in items:
if(item == 'coffe'):
print('Found Coffee')
authors = ['Andrew Ng', 'Josh Stamer', 'Kent Beck',
'Martin Fowler', 'Erick Evans', 'Erich Gamma'];
def first_two_authors(authors):
print(authors[0]) # O(1)
print(authors[1]) # O(1)
first_two_authors(authors) # O(2)
import time
items = ['coffee'] * 10;
lot_of_items = ['coffee'] * 100000
def find_coffee(items):
start = time.time()
for item in items:
if(item == 'coffe'):
def double_and_copy_list_values(items):
new_list = []
for item in items:
new_list.append(item * 2)
return new_list
def double_list_values(my_list, idx):
my_list[idx] = my_list[idx] * 2
@toniesteves
toniesteves / knn_impute.py
Created May 17, 2020 19:30 — forked from YohanObadia/knn_impute.py
Imputation of missing values with knn.
import numpy as np
import pandas as pd
from collections import defaultdict
from scipy.stats import hmean
from scipy.spatial.distance import cdist
from scipy import stats
import numbers
def weighted_hamming(data):