Skip to content

Instantly share code, notes, and snippets.

@celik-muhammed
Last active December 24, 2023 21:34
Show Gist options
  • Save celik-muhammed/d879b3fefc328f14aa0d9c341e2bf793 to your computer and use it in GitHub Desktop.
Save celik-muhammed/d879b3fefc328f14aa0d9c341e2bf793 to your computer and use it in GitHub Desktop.
Functions to Calculate Custom Metrics for Keras and TensorFlow
# Source: https://gist.github.com/arnaldog12/5f2728f229a8bd3b4673b72786913252
import numpy as np
import tensorflow as tf
from keras import backend as K
# Regression
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return 1 - SS_res/(SS_tot + K.epsilon())
def r_square_multi(y_true, y_pred):
n = K.shape(y_true)[1]
rsquared = 0
for i in range(n):
rsquared += r_square(y_true[:,i], y_pred[:,i])
return rsquared/n
# Classification
def precision(y_true, y_pred):
# Precision, proportion of correctly predicted positive instances out of all instances predicted as positive
# Identify correctly predicted positive predictions (true positives) (TP): y_true * y_pred
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=0)
# total number of positive predictions (both true and false positives, TP + FN)
predicted_as_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)
# Precision = TP / (TP + FP)
precision = true_positives / (predicted_as_positives + K.epsilon())
return K.mean(precision)
def recall(y_true, y_pred):
# Recall, proportion of correctly predicted positive instances out of all actual positive instances
# also known as true positive rate (TPR) or sensitivity (proportion of actual positives)
# Identify correctly predicted positive predictions (true positives) (TP): y_true * y_pred
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=0)
# total number of actual positive instances
actual_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)
# Recall = TP / (TP + FN)
recall = true_positives / (actual_positives + K.epsilon())
return K.mean(recall)
def Fbeta(y_true, y_pred, beta=1):
# precision-recall trade-off harmoniously
p,r = precision(y_true, y_pred), recall(y_true, y_pred)
# F1 = 2 * (Precision * Recall) / (Precision + Recall)
# Fb = (1 + beta^2) * (precision * recall) / (beta^2 * precision + recall)
numerator = (1 + beta**2) * (p * r)
denominator = (beta**2 * p + r + K.epsilon())
return numerator / denominator
# Example usage
y_true = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [1.0]])
y_pred = np.array([[0.1], [0.1], [0.3], [0.3], [0.9], [0.9]])
print(precision(y_true, y_pred).numpy())
print(recall(y_true, y_pred).numpy())
print(Fbeta(y_true, y_pred).numpy())
def f1_score(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
f1 = 2 * ((p * r) / (p + r + K.epsilon()))
return f1
def fbeta(y_true, y_pred, beta=2):
y_pred = K.clip(y_pred, 0, 1)
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=1)
fp = K.sum(K.round(K.clip(y_pred - y_true, 0, 1)), axis=1)
fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1)), axis=1)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
num = (1 + beta ** 2) * (p * r)
den = (beta ** 2 * p + r + K.epsilon())
return K.mean(num / den)
def specificity(y_true, y_pred):
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
return tn / (tn + fp + K.epsilon())
def negative_predictive_value(y_true, y_pred):
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
return tn / (tn + fn + K.epsilon())
def matthews_correlation_coefficient(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
num = tp * tn - fp * fn
den = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
return num / K.sqrt(den + K.epsilon())
def equal_error_rate(y_true, y_pred):
n_imp = tf.count_nonzero(tf.equal(y_true, 0), dtype=tf.float32) + tf.constant(K.epsilon())
n_gen = tf.count_nonzero(tf.equal(y_true, 1), dtype=tf.float32) + tf.constant(K.epsilon())
scores_imp = tf.boolean_mask(y_pred, tf.equal(y_true, 0))
scores_gen = tf.boolean_mask(y_pred, tf.equal(y_true, 1))
loop_vars = (tf.constant(0.0), tf.constant(1.0), tf.constant(0.0))
cond = lambda t, fpr, fnr: tf.greater_equal(fpr, fnr)
body = lambda t, fpr, fnr: (
t + 0.001,
tf.divide(tf.count_nonzero(tf.greater_equal(scores_imp, t), dtype=tf.float32), n_imp),
tf.divide(tf.count_nonzero(tf.less(scores_gen, t), dtype=tf.float32), n_gen)
)
t, fpr, fnr = tf.while_loop(cond, body, loop_vars, back_prop=False)
eer = (fpr + fnr) / 2
return eer
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc loss
y_true = tf.cast(y_true, dtype=y_pred.dtype)
loss = y_true * K.log(y_pred)
return -K.sum(loss, -1)
def loss_weighted(y_true, y_pred):
y_true = tf.cast(y_true, dtype=y_pred.dtype)
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc loss
loss = y_true * K.log(y_pred)
weights = np.ones(5) # define each class
return -K.sum(loss * weights, -1)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment