View post_process.py
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import re
number_match_re = re.compile(r'^([0-9]+[,.]?)+$')
number_split_re = re.compile(r'([,.])')
View failed_logins
1 .+?
1 [^
2 0000
2 010101
2 1111
2 1234
2 12345
2 666666
2 adm
2 anna
View cupy-pytorch-ptx.py
import torch
from cupy.cuda import function
from pynvrtc.compiler import Program
from collections import namedtuple
a = torch.randn(1,4,4).cuda()
b = torch.zeros(a.size()).cuda()
kernel = '''
extern "C"
View count_wikitext.py
vocab = set()
for i, line in enumerate(open('wiki.train.tokens')):
words = [x for x in line.split(' ') if x]
[vocab.add(word) for word in words]
if i < 10: print(words)
print('Vocab size:', len(vocab))
View cartpole.py
''' Script for Cartpole using policy gradient via Chainer, two layer MLP, dropout, and rejection sampling of historical memories '''
import gym
import numpy as np
import chainer
from chainer import optimizers
from chainer import ChainList, Variable
import chainer.functions as F
View buggy_cartpole.py
""" Quick script for Cartpole using policy gradient via Chainer, two layer MLP, dropout, and vaguely rejection sampling of historical memories """
import gym
import numpy as np
import chainer
from chainer import optimizers
from chainer import ChainList, Variable
import chainer.functions as F
View time_dist.py
from __future__ import print_function
import numpy as np
np.random.seed(1337)
import sys
from keras.utils.test_utils import get_test_data
from keras.models import Sequential
from keras.layers.core import Dense, TimeDistributedDense
from keras.layers.recurrent import GRU
View babi_rnn.py
from __future__ import absolute_import
from __future__ import print_function
from functools import reduce
import re
import tarfile
import numpy as np
np.random.seed(1337) # for reproducibility
bAs such, I agree strongly with you that this won't make a good test dataset for testing various RNN architectures.from keras.callbacks import EarlyStopping
View fetch_page.py
import gzip
import json
import requests
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
# Let's fetch the Common Crawl FAQ using the CC index
resp = requests.get('http://index.commoncrawl.org/CC-MAIN-2015-27-index?url=http%3A%2F%2Fcommoncrawl.org%2Ffaqs%2F&output=json')
View uniq_tasks_10k.txt
Unique samples in tasks_1-20_v1-2/en-10k/qa10_indefinite-knowledge_{}.txt
Train length: 9989
Test length: 1000
Intersection: 0
Unique samples in tasks_1-20_v1-2/en-10k/qa11_basic-coreference_{}.txt
Train length: 9827
Test length: 997
Intersection: 25
Unique samples in tasks_1-20_v1-2/en-10k/qa12_conjunction_{}.txt
Train length: 9991