Skip to content

Instantly share code, notes, and snippets.

View shubham0204's full-sized avatar
🎯
Focusing

Shubham Panchal shubham0204

🎯
Focusing
View GitHub Profile
private fun calculateGradients( inputs : DoubleArray , predY : Double , targetY : Double ) : Array<Any> {
val dJ_dPred = meanSquaredErrorDerivative( predY , targetY )
val dPred_dW = inputs
val dJ_dW = MathOps.multiplyScalar( dPred_dW , dJ_dPred )
val dJ_dB = dJ_dPred
return arrayOf( dJ_dW , dJ_dB )
}
private fun meanSquaredErrorDerivative( predY : Double , targetY : Double ) : Double {
return 2 * ( predY - targetY )
private fun optimizeParameters( gradients : ArrayList<Array<Any>> , learningRate : Double ) {
val weightGradientsList = ArrayList<DoubleArray>()
for( gradient in gradients ) {
weightGradientsList.add( gradient[0] as DoubleArray )
}
val weightGradients = MathOps.multidimMean( weightGradientsList.toTypedArray() ).toDoubleArray()
val biasGradientsList = ArrayList<Double>()
for( gradient in gradients ) {
biasGradientsList.add( gradient[1] as Double )
fun fit ( x : Array<DoubleArray> , y: DoubleArray , epochs : Int , batchSize: Int ) {
val batches = batch( x , y , batchSize )
for ( e in 0 until epochs ) {
for ( batch in batches ) {
val gradients = ArrayList<Array<Any>>()
for ( pair in batch ) {
val predictions = forwardPropogate( pair.first )
gradients.add( calculateGradients( pair.first , predictions , pair.second ) )
}
optimizeParameters( gradients , 0.001 )
model_layers = [
tf.keras.layers.Embedding( vocab_len + 1 , output_dim=50 , input_length=input_length ) ,
tf.keras.layers.Conv1D( 32 , kernel_size=5 , activation="relu",strides=1 , input_shape=( input_length , 50 )),
tf.keras.layers.Conv1D( 64, kernel_size=5, activation="relu", strides=1),
tf.keras.layers.MaxPool1D( pool_size=4 , strides=1 ),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense( 256 , activation="relu" ),
tf.keras.layers.Dropout( 0.5 ) ,
tf.keras.layers.Dense(2, activation="softmax" )
]
new_model = tf.keras.Sequential( model.layers[ 1 : ] )
new_model.save( 'android/no_embedding_model.h5' )
converter = tf.lite.TFLiteConverter.from_keras_model_file( 'android/no_embedding_model.h5')
converter.post_training_quantize = True
tflite_buffer = converter.convert()
open( 'android/model.tflite' , 'wb' ).write( tflite_buffer )
model = tf.keras.models.load_model( 'models/model.h5' )
embedding_matrix = model.layers[0].get_weights()[0] # --- ( 1 )
print( 'Embedding Shape ~> {}'.format( embedding_matrix.shape ) )
# ------------ ( 2 ) ---------------------
word_index : dict = pickle.load( open( 'glove_embedding/tokenizer.pkl' , 'rb' ) ).word_index
word_index_2 = dict()
for word , index in word_index.items():
word_index_2[ index ] = word
interface VocabCallback {
fun onDataProcessed( result : HashMap<String, DoubleArray>?)
}
private inner class LoadVocabularyTask(callback: VocabCallback?) : AsyncTask<String, Void, HashMap<String, DoubleArray>?>() {
private var callback : VocabCallback? = callback
override fun doInBackground(vararg params: String?): HashMap<String, DoubleArray>? {
val jsonObject = JSONObject( params[0] )
fun tokenize ( message : String ): Array<DoubleArray> {
val tokens : List<String> = Tokenizer.getTokens( message ).toList()
val tokenizedMessage = ArrayList<DoubleArray>()
for ( part in tokens ) {
var vector : DoubleArray? = null
if ( embeddingData!![part] == null ) {
vector = DoubleArray( embeddingDim!! ){ 0.0 }
}
else{
vector = embeddingData!![part]
@Throws(IOException::class)
private fun loadModelFile(): MappedByteBuffer {
val MODEL_ASSETS_PATH = "model.tflite"
val assetFileDescriptor = assets.openFd(MODEL_ASSETS_PATH)
val fileInputStream = FileInputStream(assetFileDescriptor.fileDescriptor)
val fileChannel = fileInputStream.channel
val startoffset = assetFileDescriptor.startOffset
val declaredLength = assetFileDescriptor.declaredLength
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startoffset, declaredLength)
}