Skip to content

Instantly share code, notes, and snippets.

View FeryET's full-sized avatar

Farhood FeryET

View GitHub Profile
@FeryET
FeryET / README.md
Created October 31, 2022 05:19 — forked from mahmoud-eskandari/README.md
Install v2ray on Bridge:(Ubuntu +18 via systemd) - Upstream (Ubuntu +18/CentOS +7 via docker)

برای شروع میتوانید یک دایرکتوری در سرور خارجی ایجاد کنید و وارد آن شوید.

mkdir vmess
cd vmess
@FeryET
FeryET / download_rcrypto_comments.py
Created August 17, 2021 07:56
Download /r/CryptoCurrency Comments
import os
import shutil
import gzip
from pmaw import PushshiftAPI
import datetime as dt
from tqdm import tqdm
import pandas as pd
from pathlib import Path
# Creating data folder
@FeryET
FeryET / pneumonia_performance.txt
Created August 14, 2021 09:11
PneumoniaNet Performance on Test Data
precision recall f1-score support
Normal 0.98 0.69 0.81 234
Pneumonia 0.84 0.99 0.91 390
accuracy 0.88 624
macro avg 0.91 0.84 0.86 624
weighted avg 0.89 0.88 0.87 624
test_loss: 0.4041681243823125
@FeryET
FeryET / pneumonia_summary.txt
Last active August 14, 2021 08:47
pneumonia_net_summary
==========================================================================================
Layer (type:depth-idx) Output Shape Param #
==========================================================================================
├─Sequential: 1-1 [-1, 576, 8, 8] --
| └─ConvBNActivation: 2-1 [-1, 16, 128, 128] (464)
| └─InvertedResidual: 2-2 [-1, 16, 64, 64] (744)
| └─InvertedResidual: 2-3 [-1, 24, 32, 32] (3,864)
| └─InvertedResidual: 2-4 [-1, 24, 32, 32] (5,416)
| └─InvertedResidual: 2-5 [-1, 40, 16, 16] (13,736)
| └─InvertedResidual: 2-6 [-1, 40, 16, 16] (57,264)
@FeryET
FeryET / pneumonia.py
Created August 14, 2021 08:10
PneumoniaNet
def load_pretrained():
pretrained_model = torchvision.models.mobilenetv3.mobilenet_v3_small(pretrained=True,
progress=True)
return pretrained_model.features
class PneumoniaNet(nn.Module):
def __init__(self,
input_dim,
finetune=False):
folds = RepeatedStratifiedKFold(n_splits=10, n_repeats=10)
vectorizer = TomotopyLDAVectorizer(num_of_topics=15, workers=workers, min_df=min_df,
rm_top=rm_top)
clf = SVC()
pca = PCA(n_components=0.95)
pipe = Pipeline([("vectorizer", vectorizer), ("scalar", StandardScaler()),
("classifier", clf)])
results = cross_val_score(pipe, docs, y_true, cv=folds, n_jobs=2, verbose=1,
def plot_topic_clusters(ax, x2d, y, labels):
ax.set_aspect("equal")
colors = cm.get_cmap("Spectral", len(labels))
for i, l in enumerate(labels):
c = colors(i / len(labels))
ax.scatter(x2d[y == i, 0], x2d[y == i, 1], color=c, label=l, alpha=0.7)
ax.grid()
ax.legend()
ax.set(adjustable='box', aspect='equal')
return ax
vectorizer = TomotopyLDAVectorizer(num_of_topics=num_of_topics,
workers=workers, min_df=min_df,
rm_top=rm_top)
x_train = vectorizer.fit_transform(docs_train)
x_test = vectorizer.transform(docs_test)
hdp_model = HDPModel(min_df=min_df, rm_top=rm_top)
hdp_model.optim_interval = 5
for d in docs_train:
hdp_model.add_doc(d)
hdp_model.burn_in = 100
hdp_model.train(0, workers=workers)
for i in range(0, 1000, 10):
hdp_model.train(10, workers=workers)
print('Iteration: {}\tLog-likelihood: {}\tNum. of topics: {}'.format(i, hdp_model.ll_per_word, hdp_model.live_k))
processor = SpacyCleaner(chunksize=1000, workers=workers)
docs = processor.transform(raw_docs)