Skip to content

Instantly share code, notes, and snippets.

View pranjalAI's full-sized avatar

Pranjal Saxena pranjalAI

View GitHub Profile
@pranjalAI
pranjalAI / detect_object.py
Created September 3, 2020 15:18
This code will give detected item as output
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
@pranjalAI
pranjalAI / app.py
Created September 3, 2020 15:28
Passing detected item to webpage
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re, glob, os,cv2
import numpy as np
import pandas as pd
import detect_object
from shutil import copyfile
import numpy as np
import tensorflow as tf
import pickle
from tensorflow.keras import layers , activations , models , preprocessing
from tensorflow.keras import preprocessing , utils
import os
import yaml
import json
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint
@pranjalAI
pranjalAI / chunks.py
Last active February 10, 2024 08:25
questions_for_token = list()
answers_for_token = list()
c=1
for con in docs:
if(c==2868):
pass
else:
con=con.strip().split("\t")
questions_for_token.append(con[0])
answers_for_token.append(con[1])
import re
def processTweet(chat):
chat = chat.lower()
chat = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','',chat)
chat = re.sub('@[^\s]+','',chat)
chat = re.sub('[\s]+', ' ', chat)
chat = re.sub(r'#([^\s]+)', r'\1', chat)
chat = re.sub(r'[\.!:\?\-\'\"\\/]', r'', chat)
chat = chat.strip('\'"')
return chat
def emb_mat(nb_words):
EMBEDDING_FILE="glove.6B.100d.txt"
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE, encoding="utf8"))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
emb_mean,emb_std
def tokenized_data(questions,answers,VOCAB_SIZE,tokenizer):
# encoder_input_data
import numpy as np
tokenized_questions = tokenizer.texts_to_sequences( questions )
maxlen_questions = max( [ len(x) for x in tokenized_questions ] )
padded_questions = preprocessing.sequence.pad_sequences( tokenized_questions , maxlen=maxlen , padding='post' )
encoder_input_data = np.array( padded_questions )
#print( encoder_input_data.shape , maxlen_questions )
# decoder_input_data
def prepare_data(questions,answers):
answers=pd.DataFrame(answers, columns=["Ans"])
questions=pd.DataFrame(questions, columns=["Question"])
questions["TokQues"]=questions["Question"].apply(getFeatureVector)
answers=np.array(answers["Ans"])
questions=np.array(questions["TokQues"])
answers_with_tags = list()
for i in range( len( answers ) ):
Prepared_data=prepare_data(questions_for_token,answers_for_token)
encoder_input_data=Prepared_data[0]
decoder_input_data=Prepared_data[1]
decoder_output_data=Prepared_data[2]
maxlen_answers=Prepared_data[3]
nb_words=Prepared_data[4]
word_index=Prepared_data[5]
tokenizer=Prepared_data[6]
embedding_matrix=emb_mat(nb_words)
encoder_inputs = tf.keras.layers.Input(shape=( None , ))
def make_inference_models():
encoder_model = tf.keras.models.Model(encoder_inputs, encoder_states)
decoder_state_input_h = tf.keras.layers.Input(shape=( 200 ,))
decoder_state_input_c = tf.keras.layers.Input(shape=( 200 ,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(