Skip to content

Instantly share code, notes, and snippets.

View shubham0204's full-sized avatar
🎯
Focusing

Shubham Panchal shubham0204

🎯
Focusing
View GitHub Profile
fun classifyText( text : String ) : Int {
val class_1_probability = findProbabilityGivenSample( text , positiveBagOfWords )
val class_2_probability = findProbabilityGivenSample( text , negativeBagOfWords )
if ( class_1_probability > class_2_probability ) {
return CLASS_POSITIVE
}
else if ( class_1_probability < class_2_probability ) {
return CLASS_NEGATIVE
}
}
class Classifier( private var positiveBagOfWords : Array<String> , private var negativeBagOfWords : Array<String>) {
companion object {
val CLASS_POSITIVE = 0
val CLASS_NEGATIVE = 1
private val englishStopWords = arrayOf(
"i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself", "yourselves",
"he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself", "they", "them", "their",
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
data = pd.read_csv( 'graduate_admission_prediction/Admission_Predict_Ver1.1.csv' )
data.head()
continuous_features = data[ ['GRE Score','TOEFL Score','University Rating','SOP','LOR ','CGPA'] ].values / 100
categorical_research_features = data[ [ 'Research' ] ].values
def mean_squared_error( Y , y_pred ):
return tf.reduce_mean( tf.square( y_pred - Y ) )
def mean_squared_error_deriv( Y , y_pred ):
return tf.reshape( tf.reduce_mean( 2 * ( y_pred - Y ) ) , [ 1 , 1 ] )
def h ( X , weights , bias ):
return tf.tensordot( X , weights , axes=1 ) + bias
num_epochs = 10
num_samples = X.shape[0]
batch_size = 10
learning_rate = 0.001
dataset = tf.data.Dataset.from_tensor_slices(( X , Y ))
dataset = dataset.shuffle( 500 ).repeat( num_epochs ).batch( batch_size )
iterator = dataset.__iter__()
num_features = X.shape[1]
weights = tf.random.normal( ( num_features , 1 ) )
bias = 0
epochs_plot = list()
loss_plot = list()
for i in range( num_epochs ) :
epoch_loss = list()
output = h( test_X , weights , bias )
labels = test_Y
accuracy_op = tf.metrics.MeanAbsoluteError()
accuracy_op.update_state( labels , output )
print( 'Mean Absolute Error = {}'.format( accuracy_op.result().numpy() ) )
private var weights : DoubleArray
private var bias = 0.0
// numFeatures is a value of type Int and refers to the number of features.
// It is the length of the X vector.
init {
val random = Random()
val weights = DoubleArray(numFeatures)
for (x in 0 until numFeatures) {
private fun batch ( x : Array<DoubleArray> , y : DoubleArray , batchSize : Int ) : List<List<Pair<DoubleArray,Double>>> {
val data = x.zip( y.toTypedArray() )
return data.chunked( batchSize )
}
private fun forwardPropogate( x : DoubleArray ) : Double {
return MathOps.dot( this.weights , x ) + bias
}