Skip to content

Instantly share code, notes, and snippets.

View mmsamiei's full-sized avatar
🤔
create and develop a framework for life ...

Mohammad Mahdi Samiei mmsamiei

🤔
create and develop a framework for life ...
View GitHub Profile
@mmsamiei
mmsamiei / shellcode_to_swap_mouse_button.cpp
Last active September 3, 2018 13:06
shellcode to swap mouse button
bits 32
jmp start
start:
push ebx
mov ecx, 0x30
mov eax, dword [fs: ecx]; EAX = Address Of PEB
mov eax, [eax + 0xc]; EAX = Address Of PEB->Ldr
mov esi, [eax + 0x14]; ESI = PEB->Ldr.InMemOrder
lodsd; EAX = Second module
xchg eax, esi; EAX = ESI, ESI = EAX
@mmsamiei
mmsamiei / simple_cnn_mnist.ipynb
Created May 10, 2019 03:14
simple_cnn_mnist.ipynb
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@mmsamiei
mmsamiei / wizard_of_wikipedia_dataloader.ipynb
Last active December 30, 2019 21:53
wizard_of_wikipedia.ipynb
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
!pip install stanfordnlp
import stanfordnlp
stanfordnlp.download('fa')
nlp = stanfordnlp.Pipeline(processors = "tokenize", lang="fa", models_dir=".")
str = "برای رفتن به سمرقند باید از خان‌های بی‌شمار گذشت"
doc = nlp(str)
for sent in doc.sentences:
for wrd in sent.words:
print(wrd.text)
from torchtext.data import BucketIterator, interleave_keys
batch_size = 32
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
# (new example to add, current effective batch size, current count of examples in the batch)
# when returned value meets batch_size (effective, innate effective batch_size
# defined as global bala bala) then wraper create a batch
[
{"page": "Mathematics"},
{"page": "Mathematician"},
{"page": "Arithmetic"},
{"page": "Addition"},
{"page": "Subtraction"},
{"page": "Multiplication"},
{"page": "Division (mathematics)"},
{"page": "Euclidean algorithm"},
{"page": "Fraction (mathematics)"},
import json
freader = open('test_random_split.json')
data = json.load(freader)
with open('correct-sample.json', 'w') as
for entry in data:
json.dump(entry, outfile)
outfile.write('\n')
import json
from pprint import pprint
from tqdm import tqdm
freader = open('test_random_split.json')
data = json.load(freader)
print(len(data))
new_dataset = []
df['polarity'] = df['Text'].map(lambda text: textblob.TextBlob(text).sentiment.polarity)
df['review_len'] = df['Text'].astype(str).apply(len)
df['word_count'] = df['Text'].apply(lambda x: len(str(x).split()))
! wget -q "https://drive.google.com/uc?export=download&id=1-3tnHTdDjtMd9O2LgKN2ir3t5KvnqrXI" -O dataset.zip
! unzip dataset.zip
import subprocess
import shlex
file_id = "1xhiGDTihHYUbGES88sYt4S6nLDjKEji1"
file_name = "mscoco.zip"
url_get_cookie = f"https://drive.google.com/uc?export=download&id={file_id}"