Skip to content

Instantly share code, notes, and snippets.

View Paarthasaarathi's full-sized avatar

Paarthasaarathi A Paarthasaarathi

  • DXC Technology
  • Tamil Nadu
View GitHub Profile
## This is a sample code for showing different ways of weight initialization
from tensorflow.keras.layers import Dense,Input
from tensorflow.keras.models import Model
#Input layer
input_layer = Input(shape=(2,))
##generates tensors with a uniform distribution
layer1 = Dense(50,activation='tanh',kernel_initializer=tf.keras.initializers.RandomUniform(0,1))(input_layer)
##He normal initializer
import tensorflow as tf
from kerastuner.tuners import RandomSearch
def model(hp):
model = tf.keras.models.Sequential()
for i in range(hp.Int('layer_hp', 5, 50)): ##number of layer
model.add(tf.keras.layers.Dense(units=hp.Int('neuron_hp', min_value=32, max_value=256, ##number of neuron
step=32), activation=hp.Choice('activation',values=['relu', 'tanh', 'sigmoid'])) ##activation function
model.add(tf.keras.layers.Dense(units=2,activation='softmax'))
model.compile(
import numpy
## relu function
def relu(z): ##z is linear transformed input
relu_activation = max(0,z)
return relu_activation
## derivative of relu
def der_relu(z):
if (z > 0):
return 1
import numpy
## tanh function
def tanh(z): ##z is linear transformed input
tanh_activation = numpy.exp(z)-numpy.exp(-z)/numpy.exp(z)+numpy.exp(-z)
return tanh_activation
## derivative of tanh
def der_tanh(z):
der_tanh = 1 - (tanh(z))**2
return der_tanh
import numpy
## sigmoid function
def sigmoid(z): ##z is linear transformed input
sigmoid_activation = 1 / (1 + numpy.exp(-z))
return sigmoid_activation
## derivative of sigmoid
def der_sigmoid(z):
der_sig = sigmoid(z)*(1-sigmoid(z))
return der_sig
##Linear transformation:
linear_transformed_input = (weight*input) + bias
##Activation function:
output_after_applying_activation_function = activation_function(summation((weight*input) + bias))
min_max_scaler = preprocessing.MinMaxScaler()
def min_max(D1,D2,test,column):
""" scaling column value using min max scalar, fitting on train and transforming """
min_max_scaler.fit(D1[column].values.reshape(-1,1))
d1_scale = min_max_scaler.transform(D1[column].values.reshape(-1,1))
d2_scale = min_max_scaler.transform(D2[column].values.reshape(-1,1))
test_scale = min_max_scaler.transform(test[column].values.reshape(-1,1))
return d1_scale,d2_scale,test_scale
def important_feature(model,data):
""" print important feature """
table = PrettyTable(['feature','weight'])
feature = data.columns ## getting column name
feature_importances = model.feature_importances_ ## getting feature weight
indices = (np.argsort(feature_importances))[-20:] ## getting impor feature weight
indices = list(indices)[::-1] ## getting in descending
print('important features :')
for i in indices:
table.add_row([feature[i],np.round(feature_importances[i],5)])
min_max_scaler = preprocessing.MinMaxScaler()
def min_max(train,test,column):
""" scaling column value using min max scalar, fitting on train and transforming """
min_max_scaler.fit(train[column].values.reshape(-1,1))
train_scale = min_max_scaler.transform(train[column].values.reshape(-1,1))
test_scale = min_max_scaler.transform(test[column].values.reshape(-1,1))
return train_scale,test_scale
## col with 0 value present
DiagnosisCode_1_count = final_data['ClmDiagnosisCode_1'].value_counts().to_dict()
DiagnosisCode_1_count[0]=0
DiagnosisCode_2_count = final_data['ClmDiagnosisCode_2'].value_counts().to_dict()
DiagnosisCode_2_count[0]=0
DiagnosisCode_3_count = final_data['ClmDiagnosisCode_3'].value_counts().to_dict()
DiagnosisCode_3_count[0]=0
DiagnosisCode_4_count = final_data['ClmDiagnosisCode_4'].value_counts().to_dict()
DiagnosisCode_4_count[0]=0
DiagnosisCode_5_count = final_data['ClmDiagnosisCode_5'].value_counts().to_dict()