Skip to content

Instantly share code, notes, and snippets.

Avatar
🎯
Focusing

Prateek Joshi prateekjoshi565

🎯
Focusing
View GitHub Profile
View cd_gn.py
def girvan_newman(graph):
# find number of connected components
sg = nx.connected_components(graph)
sg_count = nx.number_connected_components(graph)
while(sg_count == 1):
graph.remove_edge(edge_to_remove(graph)[0], edge_to_remove(graph)[1])
sg = nx.connected_components(graph)
sg_count = nx.number_connected_components(graph)
View obj_detect_all_frames_cntr.py
# kernel for image dilation
kernel = np.ones((4,4),np.uint8)
# font style
font = cv2.FONT_HERSHEY_SIMPLEX
# directory to save the ouput frames
pathIn = "contour_frames_3/"
for i in range(len(col_images)-1):
View kg_dep_parse_eg1.py
import spacy
nlp = spacy.load('en_core_web_sm')
doc = nlp("The 22-year-old recently won ATP Challenger tournament.")
for tok in doc:
print(tok.text, "...", tok.dep_)
View lp_read_data.py
# load nodes details
with open("fb-pages-food.nodes") as f:
fb_nodes = f.read().splitlines()
# load edges (or links)
with open("fb-pages-food.edges") as f:
fb_links = f.read().splitlines()
len(fb_nodes), len(fb_links)
View kg_get_relation.py
def get_relation(sent):
doc = nlp(sent)
# Matcher class object
matcher = Matcher(nlp.vocab)
#define the pattern
pattern = [{'DEP':'ROOT'},
{'DEP':'prep','OP':"?"},
View nlg_text_gen.py
# predict next token
def predict(net, tkn, h=None):
# tensor inputs
x = np.array([[token2int[tkn]]])
inputs = torch.from_numpy(x)
# push to GPU
inputs = inputs.cuda()
View kg_import_libraries.py
import re
import pandas as pd
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
View kg_get_entities.py
def get_entities(sent):
## chunk 1
ent1 = ""
ent2 = ""
prv_tok_dep = "" # dependency tag of previous token in the sentence
prv_tok_text = "" # previous token in the sentence
prefix = ""
modifier = ""
View nlg_model_arch.py
class WordLSTM(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
View nlg_train.py
def train(net, epochs=10, batch_size=32, lr=0.001, clip=1, print_every=32):
# optimizer
opt = torch.optim.Adam(net.parameters(), lr=lr)
# loss
criterion = nn.CrossEntropyLoss()
# push model to GPU
net.cuda()