Skip to content

Instantly share code, notes, and snippets.

View khanhnamle1994's full-sized avatar
🎯
Focusing

James Le khanhnamle1994

🎯
Focusing
View GitHub Profile
@khanhnamle1994
khanhnamle1994 / vgg16.py
Last active May 8, 2018 13:47
FCN - Load Pretrained VGG Model into TensorFlow.
def load_vgg(sess, vgg_path):
# load the model and weights
model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)
# Get Tensors to be returned from graph
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
layer3 = graph.get_tensor_by_name('layer3_out:0')
def get_summary_n(squad_list, squad_name, nationality_list):
summary = []
for i in nationality_list:
count = 0
for j in squad_list:
# for overall rating
O_temp_rating, _ = get_best_squad_n(formation = j, nationality = i, measurement = 'Overall')
@khanhnamle1994
khanhnamle1994 / RNN_layer.py
Created May 21, 2018 03:29
Implementation of RNN Layers
mulGate = MultiplyGate()
addGate = AddGate()
activation = Tanh()
class RNNLayer:
def forward(self, x, prev_s, U, W, V):
self.mulu = mulGate.forward(U, x)
self.mulw = mulGate.forward(W, prev_s)
self.add = addGate.forward(self.mulw, self.mulu)
self.s = activation.forward(self.add)
@khanhnamle1994
khanhnamle1994 / model_init.py
Created May 21, 2018 03:34
Initializing RNN Model with random weights
class Model:
def __init__(self, word_dim, hidden_dim=100, bptt_truncate=4):
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.bptt_truncate = bptt_truncate
self.U = np.random.uniform(-np.sqrt(1. / word_dim), np.sqrt(1. / word_dim), (hidden_dim, word_dim))
self.W = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (hidden_dim, hidden_dim))
self.V = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (word_dim, hidden_dim))
@khanhnamle1994
khanhnamle1994 / forward_prop.py
Created May 21, 2018 03:54
The forward propagation to predict word probabilities
def forward_propagation(self, x):
# The total number of time steps
T = len(x)
layers = []
prev_s = np.zeros(self.hidden_dim)
# For each time step...
for t in range(T):
layer = RNNLayer()
input = np.zeros(self.word_dim)
input[x[t]] = 1
@khanhnamle1994
khanhnamle1994 / softmax.py
Created May 21, 2018 03:56
Softmax Output
class Softmax:
def predict(self, x):
exp_scores = np.exp(x)
return exp_scores / np.sum(exp_scores)
def loss(self, x, y):
probs = self.predict(x)
return -np.log(probs[y])
def diff(self, x, y):
probs = self.predict(x)
probs[y] -= 1.0
@khanhnamle1994
khanhnamle1994 / loss.py
Created May 21, 2018 03:57
Using cross entropy to calculate loss function
def calculate_loss(self, x, y):
assert len(x) == len(y)
output = Softmax()
layers = self.forward_propagation(x)
loss = 0.0
for i, layer in enumerate(layers):
loss += output.loss(layer.mulv, y[i])
return loss / float(len(y))
def calculate_total_loss(self, X, Y):
@khanhnamle1994
khanhnamle1994 / backpropagation.py
Created May 21, 2018 03:58
Backpropagation Through Time
def bptt(self, x, y):
assert len(x) == len(y)
output = Softmax()
layers = self.forward_propagation(x)
dU = np.zeros(self.U.shape)
dV = np.zeros(self.V.shape)
dW = np.zeros(self.W.shape)
T = len(layers)
prev_s_t = np.zeros(self.hidden_dim)
@khanhnamle1994
khanhnamle1994 / sgd.py
Created May 21, 2018 04:01
Stochastic Gradient Descent
def sgd(self, x, y, learning_rate):
dU, dW, dV = self.bptt(x, y)
self.U -= learning_rate * dU
self.V -= learning_rate * dV
self.W -= learning_rate * dW
@khanhnamle1994
khanhnamle1994 / tanh.py
Last active May 22, 2018 02:36
Tanh Activation Function
class Tanh:
def forward(self, x):
return np.tanh(x)
def backward(self, x, top_diff):
output = self.forward(x)
return (1.0 - np.square(output)) * top_diff