Skip to content

Instantly share code, notes, and snippets.

View iamjanvijay's full-sized avatar

Janvijay Singh iamjanvijay

View GitHub Profile
class Solution
{
void generateParenthesisUtil(vector<string> &ans, string &temp, int &half_len, int cur_len, int cur_sum)
{
if(cur_len==0)
{
if(cur_sum==0)
ans.push_back(temp);
return;
}
@iamjanvijay
iamjanvijay / npy.py
Last active June 25, 2019 07:43
testing
import numpy as np
import math
def sigmoid(x):
return 1. / (1. + np.exp(-x))
kernel = np.load('weights.npy')
m_prev = np.load('h.npy')
c_prev = np.load('c.npy')
out = np.load('out.npy')
// Class for forward propagation of LSTM with zoneout in CUDA
#include <stdio.h>
#include <cublas_v2.h>
#include <curand.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
from torch import nn as nn
import torch
import numpy as np
in_channels = 1
out_channels = 1
kernel_size = (3, 3)
input_feature_size = (1, 1, 2, 2) # BATCH, CHANNEL, INPUT_X, INPUT_Y
def flip_kernel(kernel):
from collections import OrderedDict
from torch import nn as nn
from torch.nn import functional as F
import torch
import numpy as np
in_channels = 1
out_channels = 1
kernel_size = (4, 9)
padding_size = (7, 4)
class Solution {
string dig_to_char[8] = {"abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"};
void backtrack(string &digits, int cur, string &temp, vector<string> &ans)
{
if(temp.length()==digits.length())
{
ans.push_back(temp);
return;
}
for(int i=0; i < dig_to_char[digits[cur]-'2'].size(); i++)
vowels = [] # List of vowels.
consonants = [] # List of consonants.
def nsyl(word, lang):
if lang == 'EN':
try:
syllable_counts = [len(list(y for y in x if y[-1].isdigit())) for x in d[word.lower()]]
# print("{} present in cmu_dict: {}".format(word, sum(syllable_counts) / float(len(syllable_counts))))
return sum(syllable_counts) / float(len(syllable_counts))
except KeyError:
from bs4 import BeautifulSoup
filename = 'quora_ques_ans.html'
html_doc = ""
with open(filename, 'r') as f:
for line in f:
html_doc += line
soup = BeautifulSoup(html_doc, 'html.parser')
void dfs(vector<string> &ans, vector<vector<int>> &graph, int cur_node)
{
while(graph[cur_node].size())
{
int nxt_node = graph[cur_node][int(graph[cur_node].size())-1];
graph[cur_node].pop_back();
dfs(ans, graph, nxt_node);
}
ans.push_back(cur_node);
}
@iamjanvijay
iamjanvijay / preprocess.py
Created February 3, 2021 12:17
FinSBD2 - Tokeniser
for f in fnames:
# Read text from json and tokenize
fpath = os.path.join(data_folder, f)
text, sentences, lists, items, items1, items2, items3, items4 = read_json(fpath)
tokenised_text = word_tokenize(text)
# Additional splits to clean the dataset - mannual
add_splits = {"Classe6":["Classe", "6"], "Class.The":["Class", ".", "The"], ".The":[".", "The"], ".Such":[".", "Such"], ".These":[".", "These"]}
temp_tokenised_text = []
for token in tokenised_text: