Skip to content

Instantly share code, notes, and snippets.

View sol0invictus's full-sized avatar

Sunny Guha sol0invictus

View GitHub Profile
@sol0invictus
sol0invictus / second.py
Created January 5, 2020 04:04
loss_blog_2
class model:
def __init__(self):
xavier=tf.keras.initializers.GlorotUniform()
self.l1=tf.keras.layers.Dense(64,kernel_initializer=xavier,activation=tf.nn.relu,input_shape=[1])
self.l2=tf.keras.layers.Dense(64,kernel_initializer=xavier,activation=tf.nn.relu)
self.out=tf.keras.layers.Dense(1,kernel_initializer=xavier)
self.train_op = tf.keras.optimizers.Adagrad(learning_rate=0.1)
# Running the model
@sol0invictus
sol0invictus / first.py
Created January 5, 2020 04:10
custom loss blog file
import tensorflow.keras.backend as kb
def custom_loss(y_actual,y_pred):
custom_loss=kb.square(y_actual-y_pred)
return custom_loss
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
y_hat = tf.constant(36) # Define y_hat constant. Set to 36.
y = tf.constant(39) # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss')
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
sess = tf.Session()
print(sess.run(c))
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
def sigmoid(z):
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32, name = "x")
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict = {x: z})
def sigmoid(z):
# compute sigmoid(x)
x=np.asarray(z,dtype=np.float32)
sigmoid = tf.math.sigmoid(x)
result = sigmoid.numpy()
return result
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
def get_grad(inputs,targets,parameters):
with tf.GradientTape() as tape:
# calculate the loss
Z3=forward_propagation(inputs, parameters)
loss_value = compute_cost(Z3, targets)
# return gradient
return [tape.gradient(loss_value, list(parameters.values())),loss_value]
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,