Skip to content

Instantly share code, notes, and snippets.

View shubham0204's full-sized avatar
🎯
Focusing

Shubham Panchal shubham0204

🎯
Focusing
View GitHub Profile
import os
import yaml
dir_path = 'raw_data'
files_list = os.listdir(dir_path + os.sep)
questions = list()
answers = list()
for filepath in files_list:
stream = open( dir_path + os.sep + filepath , 'rb')
import tensorflow as tf
encoder_inputs = tf.keras.layers.Input(shape=( None , ))
encoder_embedding = tf.keras.layers.Embedding( num_tokens, 200 , mask_zero=True) (encoder_inputs)
encoder_outputs , state_h , state_c = tf.keras.layers.LSTM( 200 , return_state=True )( encoder_embedding )
encoder_states = [ state_h , state_c ]
decoder_inputs = tf.keras.layers.Input(shape=( None , ))
decoder_embedding = tf.keras.layers.Embedding( num_tokens, 200 , mask_zero=True) (decoder_inputs)
def make_inference_models():
encoder_model = tf.keras.models.Model(encoder_inputs, encoder_states)
decoder_state_input_h = tf.keras.layers.Input(shape=( 200 ,))
decoder_state_input_c = tf.keras.layers.Input(shape=( 200 ,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
from tensorflow.keras import models, layers, activations, losses, optimizers
import tensorflow.keras.backend as K
import tensorflow as tf
DIMEN = 128 # dimension of the image
input_shape = ( (DIMEN**2) * 3 , )
convolution_shape = ( DIMEN , DIMEN , 3 )
kernel_size_1 = ( 4 , 4 )
kernel_size_2 = ( 3 , 3 )
custom_images = recognizer.prepare_images_from_dir( 'custom_images/' )
class_1_images = recognizer.prepare_images_from_dir( 'images/p1/' )
class_2_images = recognizer.prepare_images_from_dir( 'images/p2/' )
scores = list()
labels = list()
for image in custom_images:
label = list()
score = list()
for sample in class_1_images :
import tensorflow as tf
from tensorflow.keras import optimizers,losses,activations
from tensorflow.keras.layers import *
dropout_rate = 0.5
input_shape = ( maxlen , )
target_shape = ( maxlen , 1 )
# Note : activations.leaky_relu is a CUSTOM IMPLEMENTATION. It DOES NOT EXIST in the official TensorFlow build.
import json
with open( 'android/word_dict.json' , 'w' ) as file:
json.dump( tokenizer.word_index , file )
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model_file( 'models/model.h5' )
converter.post_training_quantize = True
tflite_buffer = converter.convert()
open( 'android/model.tflite' , 'wb' ).write( tflite_buffer )
private fun loadJSONFromAsset(filename : String? ): String? {
var json: String? = null
try {
val inputStream = context!!.assets.open(filename )
val size = inputStream.available()
val buffer = ByteArray(size)
inputStream.read(buffer)
inputStream.close()
json = String(buffer)
fun tokenize ( message : String ): IntArray {
val parts : List<String> = message.split(" " )
val tokenizedMessage = ArrayList<Int>()
for ( part in parts ) {
if (part.trim() != ""){
var index : Int? = 0
if ( vocabData!![part] == null ) {
index = 0
}
else{