Skip to content

Instantly share code, notes, and snippets.

View isauravmanitripathi's full-sized avatar
🏁
1111

saurav tripathi isauravmanitripathi

🏁
1111
View GitHub Profile
from positional_encoding import PositionEmbeddingFixedWeights
class Decoder(Layer):
def __init__(self, vocab_size, sequence_length, h, d_k, d_v, d_model, d_ff, n, rate, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.pos_encoding = PositionEmbeddingFixedWeights(sequence_length, vocab_size, d_model)
self.dropout = Dropout(rate)
self.decoder_layer = [DecoderLayer(h, d_k, d_v, d_model, d_ff, rate) for _ in range(n)
...
...
def call(self, x, encoder_output, lookahead_mask, padding_mask, training):
# Multi-head attention layer
multihead_output1 = self.multihead_attention1(x, x, x, lookahead_mask)
# Expected output shape = (batch_size, sequence_length, d_model)
# Add in a dropout layer
multihead_output1 = self.dropout1(multihead_output1, training=training)
# Followed by an Add & Norm layer
from multihead_attention import MultiHeadAttention
from encoder import AddNormalization, FeedForward
class DecoderLayer(Layer):
def __init__(self, h, d_k, d_v, d_model, d_ff, rate, **kwargs):
super(DecoderLayer, self).__init__(**kwargs)
self.multihead_attention1 = MultiHeadAttention(h, d_k, d_v, d_model)
self.dropout1 = Dropout(rate)
self.add_norm1 = AddNormalization()
self.multihead_attention2 = MultiHeadAttention(h, d_k, d_v, d_model)
#preprocessing
#remove missing values
loans = loans.dropna()
#remove outliers
q_low = loans["annual_inc"].quantile(0.08)
q_hi = loans["annual_inc"].quantile(0.92) loans = loans[(loans["annual_inc"] < q_hi) & (loans["annual_inc"] > q_low)]
loans = loans[(loans['dti'] <=45)]
q_hi = loans['bc_open_to_buy'].quantile(0.95)
loans = loans[(loans['bc_open_to_buy'] < q_hi)]
loans = loans[(loans['bc_util'] <=160)]
# PROCESSING & DISPLAY
def display():
with st.container():
st.write("#### Which cities and states have recorded the most accidents?")
res = computeQuery(query_8, graph)
fig = px.treemap(res, path=[px.Constant("U.S"), "state", "city"], values="count", hover_data=["state", "city","count"],
color="count",
color_continuous_scale='tealrose',
color_continuous_midpoint=np.average(res['count'], weights=res['count']))
st.plotly_chart(fig, use_container_width=True)
acc_category = data[["category"]]
acc_category = acc_category.drop_duplicates()
acc_category = acc_category.reset_index(drop=True)
acc_category["id"] = acc_category.index
# relate tables with index
for i, row in data.iterrows():
cat = data.at[i, "category"]
for j, row in acc_category.iterrows():
# PRELIMINARY OPERATIONS
data = pd.read_excel("./data_original.xlsx")
data = data.drop(columns=["fix_port","source", "age_youngest", "gender", "injury_desc", "report", "notes", "mechanical", "op_error", "employee"])
data = data.dropna()
data = data[data["bus_type"]=="Amusement park"]
data = data[data["industry_sector"]=="amusement ride"]
data = data[data["manufacturer"] != "In-house"]
nltk==3.7
pytrends==4.8.0
import re
from string import punctuation
import nltk
from nltk import TreebankWordTokenizer, sent_tokenize
from nltk.corpus import stopwords
class KeywordsGenerator:
def __init__(self, pytrends):
import java.awt.Cursor;
import java.awt.Font;
import java.util.regex.Pattern;
import java.awt.Color;
import javax.swing.*;
import java.lang.Math;
public class calculator {
private static final int WINDOW_WIDTH = 410;