Skip to content

Instantly share code, notes, and snippets.

class MinimalChain():
def __init__(self): # initialize when creating a chain
self.blocks = [self.get_genesis_block()]
def get_genesis_block(self):
return MinimalBlock(0,
datetime.datetime.utcnow(),
'Genesis',
'arbitrary')
@edenau
edenau / MinimalBlock.py
Last active December 27, 2019 16:40
MinimalBlock object in minimal blockchain
import hashlib
class MinimalBlock():
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hashing()
# test loss
print(get_loss(y_test, forward_prop(X_test)))
def predict(X_raw_any):
X_any = np.array([standardize(X_raw_any[row,:], X_scalers[row]) for row in range(X_num_row)])
y_hat = forward_prop(X_any)
y_hat_any = np.array([unstandardize(y_hat[row,:], y_scalers[row]) for row in range(y_num_row)])
return y_hat_any
learning_rate = 0.01
max_epoch = 1000000
for epoch in range(1,max_epoch+1):
y_hat_train = forward_prop(X_train) # update y_hat
backward_prop(y_train, y_hat_train) # update (dW,db)
for layer_index in range(1,len(layers_dim)): # update (W,b)
neural_net[layer_index].W = neural_net[layer_index].W - learning_rate * neural_net[layer_index].dW
neural_net[layer_index].b = neural_net[layer_index].b - learning_rate * neural_net[layer_index].db
def get_loss(y, y_hat, metric='mse'):
if metric == 'mse':
individual_loss = 0.5 * (y_hat - y) ** 2
return np.mean([np.linalg.norm(individual_loss[:,col], 2) for col in range(individual_loss.shape[1])])
else:
raise Exception('Loss metric is not defined.')
def get_dZ_from_loss(y, y_hat, metric):
if metric == 'mse':
return y_hat - y
def activation(input_, act_func):
if act_func == 'relu':
return np.maximum(input_, np.zeros(input_.shape))
elif act_func == 'linear':
return input_
else:
raise Exception('Activation function is not defined.')
def forward_prop(input_vec, layers_dim=layers_dim, neural_net=neural_net):
class layer:
def __init__(self, layer_index, is_output, input_dim, output_dim, activation):
self.layer_index = layer_index # zero indicates input layer
self.is_output = is_output # true indicates output layer, false otherwise
self.input_dim = input_dim
self.output_dim = output_dim
self.activation = activation
# the multiplication constant is sorta arbitrary
if layer_index != 0:
class scaler:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def get_scaler(row):
mean = np.mean(row)
std = np.std(row)
return scaler(mean, std)
train_ratio = 0.7
num_train_datum = int(train_ratio*X_num_col)
X_raw_train = X_raw[:,0:num_train_datum]
X_raw_test = X_raw[:,num_train_datum:]
y_raw_train = y_raw[:,0:num_train_datum]
y_raw_test = y_raw[:,num_train_datum:]
X_num_row, X_num_col = [2, 10000] # Row is no. of feature, col is no. of datum points
X_raw = np.random.rand(X_num_row,X_num_col) * 100
y_raw = np.concatenate(([(X_raw[0,:] + X_raw[1,:])], [(X_raw[0,:] - X_raw[1,:])], np.abs([(X_raw[0,:] - X_raw[1,:])])))
# for input a and b, output is a+b; a-b and |a-b|
y_num_row, y_num_col = y_raw.shape