Skip to content

Instantly share code, notes, and snippets.

View mmuratarat's full-sized avatar
😃

Mustafa Murat Arat mmuratarat

😃
View GitHub Profile
reset_graph()
n_epochs = 1000
learning_rate = 0.01
X = tf.constant(inputs, dtype = tf.float32, name = "x")
y = tf.constant(output, dtype = tf.float32, name = "y")
theta = tf.Variable(tf.random_uniform([n,1], -1.0, 1.0), name ="theta")
logits = tf.matmul(X, theta, name="logits")
#predictions = 1/(1+ tf.exp(-logits))
predictions = tf.sigmoid(logits)
import tensorflow as tf
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
reset_graph()
#Using a GradientDescentOptimizer of Tensorflow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
import math
housing = fetch_california_housing()
reset_graph()
#The preceding code works fine but it requires mathematically deriving the grdients from the cost function (MSE).
#In case of linear regression it is reasonably easy but if you had to do this with deep neural networks,
#you would get quite headache, it would be tedious and error-prone. Thankfully TF's autodiff feature comes to the rescue.
#it can automatically and efficiently compute the gradients for you.
#gradients = tf.gradients(mse, [theta])[0]
#The gradients() function takes an op (in this case mse) and a list of variables (in this case just theta)
#and it creates a list of ops (one per variable) to compute the gradients of the op with regards to each variable.
reset_graph()
#When using Gradient Descent, remember that it is important to first normalize the input feature vector,
#or else training may be much slower.
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
import math
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing.data, housing.target.reshape(-1, 1))
print(np.r_[lin_reg.intercept_.reshape(-1, 1), lin_reg.coef_.T])
#[[-3.69419202e+01]
# [ 4.36693293e-01]
# [ 9.43577803e-03]
# [-1.07322041e-01]
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
import math
housing = fetch_california_housing()
print(type(housing['data']))
print(type(housing['target']))
#DIFFERENCE BETWEEN tf.nn.softmax_cross_entropy_with_logits AND tf.nn.sparse_softmax_cross_entropy_with_logits
#WARNING: Both ops expect unscaled logits, since it performs a softmax on logits internally for efficiency.
# Do not call these ops with the output of softmax, as it will produce incorrect results.
#For tf.nn.sparse_softmax_cross_entropy_with_logits:
# "A common use case is to have logits of shape [batch_size, num_classes] and labels of shape [batch_size]. But higher dimensions are supported."
# NOTE: For this operation, the probability of a given label is considered exclusive.
#That is, soft classes are not allowed, and the labels vector must provide a single specific index for the true class for each row of logits (each minibatch entry).
## MULTICLASS CROSS ENTROPY
import tensorflow as tf
import numpy as np
from sklearn.metrics import log_loss
#4 observations and 4 classes
y_true = [4,4,3,1] #Hard classes
y_true_onehot = [[0,0,0,1], [0,0,0,1], [0,0,1,0], [1,0,0,0]] #one_hot encoding
y_pred = [[0.2, 0.2, 0.2, 0.2], [0.6, 0.7, 0.1, 0.2], [0.3, 0.9, 0.1, 0.1], [0.1, 0.5, 0.7, 0.4]]
y_pred_softmax = [[0.25, 0.25, 0.25, 0.25], [0.29568115, 0.3267782, 0.17933969, 0.198201], [0.22423635, 0.40858525, 0.18358919, 0.18358919], [0.17655984, 0.26339632, 0.32171297, 0.23833084]]