Skip to content

Instantly share code, notes, and snippets.

@piercelamb
Created December 20, 2022 18:01
Show Gist options
  • Save piercelamb/326099a5acfef6cd34f5e4529d3e0a33 to your computer and use it in GitHub Desktop.
Save piercelamb/326099a5acfef6cd34f5e4529d3e0a33 to your computer and use it in GitHub Desktop.
get_statistics
predicted_label_id = str(result[0])
if config.is_comparison:
predictions[model_name] = id2label[predicted_label_id]
print(f"{model_name} Predicted Label: {str(id2label[predicted_label_id])}")
f1_metric = model_data['metrics']['f1']
acc_metric = model_data['metrics']['acc']
f1_metric.add_batch(
predictions=[int(predicted_label_id)],
references=[int(actual_label_id)]
)
acc_metric.add_batch(
predictions=[int(predicted_label_id)],
references=[int(actual_label_id)]
)
model_data['stats'] = get_multi_class_stats(
model_data['stats'],
predicted_label_id,
actual_label_id,
id2label
)
print("--------------------------------------------\n")
if config.is_comparison:
add_counts_to_comparison(comparison_stats, predictions, ground_truth_label)
if config.is_comparison:
calc_most_common_fps(comparison_stats)
upload_comparison(comparison_stats, config)
process_statistics(model_containers, config)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment