Skip to content

Instantly share code, notes, and snippets.

View prakhar21's full-sized avatar
🙇‍♂️
Working

Prakhar Mishra prakhar21

🙇‍♂️
Working
View GitHub Profile
@prakhar21
prakhar21 / trie_longest_prefix.py
Created February 4, 2021 07:05
Trie Longest Prefix in Python
class TrieNode:
def __init__(self):
self.child = {}
self.last = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, data):
@prakhar21
prakhar21 / trie_prefix_search.py
Created February 3, 2021 17:08
Trie Prefix Search Python
class TrieNode:
def __init__(self):
self.child = {}
self.last = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, data):
@prakhar21
prakhar21 / trie_search.py
Created February 3, 2021 16:29
Trie Search in Python
class TrieNode:
def __init__(self):
self.child = {}
self.last = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, data):
@prakhar21
prakhar21 / trie_insert.py
Last active February 3, 2021 16:17
Trie Insert in Python
class TrieNode:
def __init__(self):
self.child = {}
self.last = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, data):
@prakhar21
prakhar21 / flair_sentence_train.py
Created January 19, 2020 17:26
Flair Sentence Embeddings and Training a Classifier
document_embeddings = DocumentRNNEmbeddings(word_embeddings, hidden_size=512, reproject_words=True, reproject_words_dimension=256, rnn_type='LSTM', rnn_layers=1, bidirectional=False)
classifier = TextClassifier(document_embeddings, label_dictionary=corpus.make_label_dictionary(), multi_label=False)
trainer = ModelTrainer(classifier, corpus)
trainer.train('./model', max_epochs=20, patience=5, mini_batch_size=32, learning_rate=0.1)
@prakhar21
prakhar21 / flair_embeddings.py
Created January 19, 2020 17:22
flair embeddings
def initialize_embeddings():
"""
Summary:
Stacks the list of pre-trained embedding vectors to be used as word representation (in concat.)
Return:
list: Returns list of pretrained embeddings vectors
"""
word_embeddings = [
WordEmbeddings('glove'),
FlairEmbeddings('news-forward'),
@prakhar21
prakhar21 / segment_data.py
Created January 19, 2020 17:03
Data Segment
def segment_data(data_file):
try:
import pandas as pd
except ImportError:
raise
data = pd.read_csv(data_file, encoding='latin-1').sample(frac=1).drop_duplicates()
data = data[['classes', 'title']].rename(columns={"classes":"label", "title":"text"})
data['label'] = '__label__' +data['label'].astype(str)
data['text'] = data['text'].apply(lambda k: k.lower().strip())
@prakhar21
prakhar21 / locustfile.py
Created February 16, 2019 12:15
Locust Load Testing
import random
import os
from locust import HttpLocust, TaskSet, task
TEST_DATA_PATH = 'test.csv'
def load_test_sentences():
utterances = []
with open(TEST_DATA_PATH, 'r') as fp:
for row in fp:
@prakhar21
prakhar21 / map.py
Created January 26, 2019 06:42
GeoSpatial Data Visualisation using Folium in Python
import folium
from folium.plugins import MarkerCluster
city_latlong = {
'Agra': [27.1767, 78.0081], 'Ahmedabad': [23.0225, 72.5714], 'Durgapur': [23.5204, 87.3119],
'Aurangabad': [19.8762, 75.3433], 'Bengaluru': [12.9716, 77.5946], 'Bhopal': [23.2599, 77.4126],
'Coimbatore': [11.0168, 76.9558], 'Delhi': [28.7041, 77.1025], 'Dhanbad': [23.7957, 86.4304],
'Faridabad': [28.4089, 77.3178], 'Ghaziabad': [28.6692, 77.4538], 'Gwalior': [26.2183, 78.1828],
'Hyderabad': [17.3850, 78.4867], 'Indore': [22.7196, 75.8577], 'Jaipur': [26.9124, 75.7873],
'Jabalpur': [23.1815, 79.9864], 'Jamshedpur': [22.8046, 86.2029], 'Jodhpur': [26.2389, 73.0243],
#!/usr/bin/env python
from flask import Flask, jsonify, request
import numpy as np
import pickle
app = Flask(__name__)
model_filepath = 'best_model.pickle'
def load_model():