Skip to content

Instantly share code, notes, and snippets.

@pishangujeniya
Forked from dgrahn/keras_metrics.py
Last active February 3, 2020 09:33
Show Gist options
  • Save pishangujeniya/ca8dd46a5d5cf0b0391b712c1a03b9b6 to your computer and use it in GitHub Desktop.
Save pishangujeniya/ca8dd46a5d5cf0b0391b712c1a03b9b6 to your computer and use it in GitHub Desktop.
F1 Score Metrics removed from Keras in 2.0. Get True Positive, False Positive, True Negative, False Negative, Precision, Recall, Accuracy
"""Keras 1.0 metrics.
This file contains the precision, recall, and f1_score metrics which were
removed from Keras by commit: a56b1a55182acf061b1eb2e2c86b48193a0e88f7
"""
from keras import backend as K
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision. Computes the precision, a
metric for multi-label classification of how many selected items are
relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall. Computes the recall, a metric
for multi-label classification of how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def f1_score(y_true, y_pred):
"""Computes the F1 Score
Only computes a batch-wise average of recall. Computes the recall, a metric
for multi-label classification of how many relevant items are selected.
"""
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return (2 * p * r) / (p + r + K.epsilon())
# use it as per below mentioned code...
# model.compile(loss='categorical_crossentropy',
# optimizer='adam',
# metrics=['accuracy', f1_score])
def tpfptnfn(cls, y_true, y_predicted):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(y_predicted)):
if y_true[i] == y_predicted[i] == 1:
tp += 1
if y_predicted[i] == 1 and y_true[i] != y_predicted[i]:
fp += 1
if y_true[i] == y_predicted[i] == 0:
tn += 1
if y_predicted[i] == 0 and y_true[i] != y_predicted[i]:
fn += 1
return tp, fp, tn, fn
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment