Skip to content

Instantly share code, notes, and snippets.

@rajsandhu1989
Last active February 10, 2022 09:07
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rajsandhu1989/f9af1e69479abd28754c7f2a65096840 to your computer and use it in GitHub Desktop.
Save rajsandhu1989/f9af1e69479abd28754c7f2a65096840 to your computer and use it in GitHub Desktop.
#import tensorflow as tf
import tensorflow as tf
# Declare a constant, we need to import constant from tensorflow
sample_constant=tf.constant(20)
print(sample_constant.dtype)
# Now we can perform some operations using tensorlfow in this constant
#Similarly, we can create variables also
A1=tf.Variable([1,2,3,4])
print(A1)
#Above created variable can be printed using the numpy conversion
print(A1.numpy())
B1=A1.numpy()
B1=tf.Variable(B1)
print(B1)
#create two tensors
a1=tf.fill([3,3], 7)
a2=tf.fill([3,3], 3)
#Add the two tensors
a3=tf.add(a1,a2)
#print final tensor
print(a3.numpy())
#fill method
c33=tf.fill([3,3], 7)
print(c33.numpy())
#Ones function
A1=tf.ones([3,2], tf.int32)
print(A1.numpy())
#Zeros function
B1=tf.zeros([3,2])
print(B1)
A1=tf.constant([1,2,3,4])
A23=tf.constant([[1,2,3], [4,5,6]])
#create ones tensor and perform element wise multiplication
B1=tf.ones_like(A1)
B23=tf.ones_like(A23)
# Define x
x = tf.Variable(6.0)
# Define y within instance of GradientTape
with tf.GradientTape() as gt:
gt.watch(x)
y = tf.multiply(x, x)
#Evaluate the gradient of y at x = 6
g = gt.gradient(y, x)
print(g.numpy())
''' OUTPUT
12.0
'''
#extract mpg and horsepower from the dataset
mpg_1=np.array(mpg['mpg'], np.float32)
horsepower=np.array(mpg['horsepower'], np.float32)
#define the intercept and slope
intercept=tf.Variable(0.2, np.float32)
slope=tf.Variable(0.2, np.float32)
#create a linear regression using y=mx+b
def linear_regression(intercept, slope, features=horsepower):
return slope*features+intercept
#create a loss function
def loss_function(intercept, slope, target=mpg_1, features=horsepower):
#create predictions
pred=linear_regression(intercept, slope, features)
loss= tf.keras.losses.mse(target, pred)
return loss
#create an instance of optimizer
opt=tf.keras.optimizers.Adam()
#minimize the loss using epochs
epochs=1000
for i in range(epochs):
opt.minimize(lambda: loss_function(intercept, slope), var_list=[intercept, slope])
print(np.array(loss_function(intercept, slope)))
print(np.array(intercept), np.array(slope))
#Ones function
A1=tf.ones([3,2], tf.int32)
#create ones tensor and perform element wise multiplication
B1=tf.ones_like(A1)
C1=tf.multiply(A1,B1)
# Matrix multiplication
#create feature value
feat_value=tf.constant([[1,12],[2,13],[3,14]])
'''' shape is tf.Tensor(
[[ 1 12]
[ 2 13]
[ 3 14]], shape=(3, 2), dtype=int32)'''
parameters=tf.constant([[100],[200]])
''' Shape is tf.Tensor(
[[100]
[200]], shape=(2, 1), dtype=int32)'''
#create predictions by matrix multiplication
pred=tf.matmul(feat_value, parameters)
print(pred)
#we can reduce the value of tensor
feat_value=tf.constant([[1,12],[2,13],[3,14]])
pred=tf.reduce_sum(feat_value)
print(pred)
''' OUTPUT
tf.Tensor(45, shape=(), dtype=int32)
'''
#we can reduce at any dimension also
print(tf.reduce_sum(feat_value, 0))
# This is reduce sum for zero dimension means column wise
''' OUTPUT
tf.Tensor([ 6 39], shape=(2,), dtype=int32)
'''
print(tf.reduce_sum(feat_value, 1))
# This is reduce sum for first dimension means row wise
''' OUTPUT
tf.Tensor([13 15 17], shape=(3,), dtype=int32)
'''
#we need to reshape a picture so that it can be feed to neural network
#lets say we have a 28*28 grayscale image
image=tf.random.uniform([28,28], maxval=255, dtype='int32')
image_reshape=tf.reshape(image, [28*28,1])
#this is required when we input an image to the neural network
reduce_all() Computes Logical AND across dimensions of tensors.
reduce_any() Computes Logical OR across dimensions of tensors.
reduce_euclidean_norm() Computes the Euclidean norm of elements across dimensions of a tensor.
reduce_max() Finds maximum across tensor dimension
reduce_min() Finds minimum across tensor dimension
reduce_mean() Computes the mean of elements across dimensions of a tensor.
reduce_prod() Computes the products of elements across dimensions of a tensor.
reduce_std() Computes the standard deviation of elements across dimensions of a tensor.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment