View load_mat.py
import scipy.io as scio | |
from typing import Any, Dict | |
import numpy as np | |
def load_matfile(filename: str) -> Dict: | |
def parse_mat(element: Any): | |
# lists (1D cell arrays usually) or numpy arrays as well | |
if element.__class__ == np.ndarray and element.dtype == np.object_ and len(element.shape) > 0: | |
return [parse_mat(entry) for entry in element] |
View pokemon_jupyter.py
# first, install requests and matplotlib (pip install requests matplotlib) | |
from urllib.request import urlopen | |
from PIL import Image | |
import matplotlib.pyplot as plt | |
import requests | |
api_url_pokemon = 'https://pokeapi.co/api/v2/pokemon/pikachu' | |
result = requests.get(api_url_pokemon) | |
if result.status_code == 200: | |
pokemon_data = result.json() |
View statistic-list.py
from scipy.stats import describe | |
import numpy as np | |
# arr_values is a numpy array | |
def print_stats(arr_values: np.array) -> None: | |
stats = describe(arr_values) | |
print(f'min: {stats.minmax[0]:.5f}, max: {stats.minmax[1]:.4f}') | |
print(f'mean: {stats.mean:.5f}') | |
print(f'standard: {np.std(arr_values):.5f}') | |
print(f'variance: {stats.variance:.5f}') |
View cleaning-csharp.bat
@echo off | |
REM Remove files generated by compiler in this directory and all subdirectories. | |
REM Essential release files are kept. | |
echo Removing "*.csproj.user" files... | |
for /f "delims==" %%i in ('dir /b /on /s "%~p0*.csproj.user"') do del "%%i" /f /q | |
echo. | |
echo Removing "*.exe.config" files... |
View plot-ROC.py
from sklearn.metrics import roc_auc_score, roc_curve | |
def plot_roc(name: str, labels: numpy.ndarray, predictions: numpy.ndarray, **kwargs) -> (): | |
fp, tp, _ = roc_curve(labels, predictions) | |
auc_roc = roc_auc_score(labels, predictions) | |
plt.plot(100*fp, 100*tp, label=name + " (" + str(round(auc_roc, 3)) + ")", | |
linewidth=2, **kwargs) | |
plt.xlabel('False positives [%]') | |
plt.ylabel('True positives [%]') | |
plt.title('ROC curve') |
View precision-example.py
precision = Precision() | |
precision.update_state(y_train, y_train_pred) | |
precision.result().numpy() |
View plot-confusion-matrix.py
from sklearn.metrics import confusion_matrix | |
import seaborn as sns | |
# notice the threshold | |
def plot_cm(labels: numpy.ndarray, predictions: numpy.ndarray, p: float=0.5) -> (): | |
cm = confusion_matrix(labels, predictions > p) | |
# you can normalize the confusion matrix | |
plt.figure(figsize=(5,5)) | |
sns.heatmap(cm, annot=True, fmt="d") |
View evaluate-model.py
# Evaluate the model on the test data using `evaluate` | |
print("Evaluate on test data") | |
score_test = model.evaluate(test_ds.batch(batch_size)) | |
for name, value in zip(model.metrics_names, score_test): | |
print(name, ': ', value) |
View plot-metrics.py
def plot_metrics(history: History) -> (): | |
metrics = ['loss', 'precision', 'recall', 'auc', 'tp', 'sensitivity'] | |
for n, metric in enumerate(metrics): | |
name = metric.replace("_"," ").capitalize() | |
plt.subplot(3, 2, n+1) # adjust according to metrics | |
plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train') | |
plt.plot(history.epoch, history.history['val_'+metric], | |
color=colors[0], linestyle="--", label='Val') | |
plt.xlabel('Epoch') | |
plt.ylabel(name) |
View plot-loss.py
import matplotlib.pyplot as plt | |
from matplotlib import rcParams | |
rcParams['figure.figsize'] = (12, 10) | |
colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] | |
def plot_log_loss(history: History, title_label: str, n: int) -> (): | |
# Use a log scale to show the wide range of values. | |
plt.semilogy(history.epoch, history.history['loss'], | |
color=colors[n], label='Train '+title_label) |
NewerOlder