Skip to content

Instantly share code, notes, and snippets.

@shubham0204
Created May 27, 2019 12:18
Show Gist options
  • Save shubham0204/09f6cab38add9ad56306cbb9b36e24f1 to your computer and use it in GitHub Desktop.
Save shubham0204/09f6cab38add9ad56306cbb9b36e24f1 to your computer and use it in GitHub Desktop.
num_features = X.shape[1]
weights = tf.random.normal( ( num_features , 1 ) )
bias = 0
epochs_plot = list()
loss_plot = list()
for i in range( num_epochs ) :
epoch_loss = list()
for b in range( int(num_samples/batch_size) ):
x_batch , y_batch = iterator.get_next()
output = h( x_batch , weights , bias )
loss = epoch_loss.append( mean_squared_error( y_batch , output ).numpy() )
dJ_dH = mean_squared_error_deriv( y_batch , output)
dH_dW = x_batch
dJ_dW = tf.reduce_mean( dJ_dH * dH_dW )
dJ_dB = tf.reduce_mean( dJ_dH )
weights -= ( learning_rate * dJ_dW )
bias -= ( learning_rate * dJ_dB )
loss = np.array( epoch_loss ).mean()
epochs_plot.append( i + 1 )
loss_plot.append( loss )
print( 'Loss is {}'.format( loss ) )
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment