Skip to content

Instantly share code, notes, and snippets.

@weiaicunzai
Created June 22, 2018 13:20
Show Gist options
  • Save weiaicunzai/2a5ae6eac6712c70bde0630f3e76b77b to your computer and use it in GitHub Desktop.
Save weiaicunzai/2a5ae6eac6712c70bde0630f3e76b77b to your computer and use it in GitHub Desktop.
compute top1, top5 error using pytorch
from __future__ import print_function, absolute_import
__all__ = ['accuracy']
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
@Tikquuss
Copy link

Tikquuss commented Mar 27, 2021

Thank you for your implementation.

Also wanting to have f1_score, intersection over union (iou) and the predicted labels, I did this.

import torch
from torch import tensor
from sklearn.metrics import f1_score, accuracy_score, jaccard_score

def custom_rand(shape : tuple, a = 0, b = 1., random_seed = 0, requires_grad = False) :
    """generate a random tensor of shape `shape` fill with number in range (a, b)"""
    torch.manual_seed(random_seed)
    #torch.backends.cudnn.deterministic = True
    #torch.backends.cudnn.benchmark = False
    return (b - a) * torch.rand(shape).requires_grad_(requires_grad) + b 

def top_k(logits, y, k : int = 1):
    """
    logits : (bs, n_labels)
    y : (bs,)
    """
    labels_dim = 1
    assert 1 <= k <= logits.size(labels_dim)
    k_labels = torch.topk(input = logits, k = k, dim=labels_dim, largest=True, sorted=True)[1]

    # True (#0) if `expected label` in k_labels, False (0) if not
    a = ~torch.prod(input = torch.abs(y.unsqueeze(labels_dim) - k_labels), dim=labels_dim).to(torch.bool)
    
    # These two approaches are equivalent
    if False :
        y_pred = torch.empty_like(y)
        for i in range(y.size(0)):
            if a[i] :
                y_pred[i] = y[i]
            else :
                y_pred[i] = k_labels[i][0]
        #correct = a.to(torch.int8).numpy()
    else :
        a = a.to(torch.int8)
        y_pred = a * y + (1-a) * k_labels[:,0]
        #correct = a.numpy()

    f1 = f1_score(y_pred, y, average='weighted')*100
    #acc = sum(correct)/len(correct)*100
    acc = accuracy_score(y_pred, y)*100

    iou = jaccard_score(y, y_pred, average="weighted")*100

    return acc, f1, iou, y_pred

if __name__ == '__main__':
	bs, n_labels = 10, 6
	random_seed = 0
	logits = custom_rand((bs, n_labels), random_seed = random_seed)
	"""
	tensor([[1.4963, 1.7682, 1.0885, 1.1320, 1.3074, 1.6341],
	        [1.4901, 1.8964, 1.4556, 1.6323, 1.3489, 1.4017],
	        [1.0223, 1.1689, 1.2939, 1.5185, 1.6977, 1.8000],
	        [1.1610, 1.2823, 1.6816, 1.9152, 1.3971, 1.8742],
	        [1.4194, 1.5529, 1.9527, 1.0362, 1.1852, 1.3734],
	        [1.3051, 1.9320, 1.1759, 1.2698, 1.1507, 1.0317],
	        [1.2081, 1.9298, 1.7231, 1.7423, 1.5263, 1.2437],
	        [1.5846, 1.0332, 1.1387, 1.2422, 1.8155, 1.7932],
	        [1.2783, 1.4820, 1.8198, 1.9971, 1.6984, 1.5675],
	        [1.8352, 1.2056, 1.5932, 1.1123, 1.1535, 1.2417]])
	"""

	torch.manual_seed(random_seed)
	y = torch.randint(low=0, high=n_labels, size = (bs,))
	"""
	tensor([2, 3, 5, 0, 1, 3, 1, 1, 1, 3])
	"""

	topK = 6
	for k in range(1, topK+1):
	    k_acc, k_f1, k_iou, y_pred = top_k(logits = logits.detach().cpu(), y=y, k=k)
	    print(k, k_acc, k_f1, k_iou, y_pred)

	"""
	1 20.0 20.0 15.714285714285714 tensor([1, 1, 5, 3, 2, 1, 1, 4, 3, 0])
	2 40.0 40.0 29.333333333333332 tensor([1, 3, 5, 3, 1, 1, 1, 4, 3, 0])
	3 50.0 50.0 38.0 tensor([1, 3, 5, 3, 1, 3, 1, 4, 3, 0])
	4 50.0 50.0 38.0 tensor([1, 3, 5, 3, 1, 3, 1, 4, 3, 0])
	5 60.0 60.0 49.00000000000001 tensor([1, 3, 5, 3, 1, 3, 1, 4, 1, 0])
	6 100.0 100.0 100.0 tensor([2, 3, 5, 0, 1, 3, 1, 1, 1, 3])
	"""

@slmatrix
Copy link

slmatrix commented Sep 5, 2023

code below requires invocation for each different K.
imo, removing the loop and tuple makes the code clearer:

@torch.no_grad()
def accuracy(result, answer, topk=1):
    r'''
    result (batch_size, class_cnt)
    answer (batch_size)
    '''
    #save the batch size before tensor mangling
    bz = answer.size(0)
    #ignore result values. its indices: (sz,cnt) -> (sz,topk)
    values, indices = result.topk(topk)
    #transpose the k best indice
    result = indices.t()  #(sz,topk) -> (topk, sz)
    
    #repeat same labels topk times to match result's shape
    answer = answer.view(1, -1)       #(sz) -> (1,sz)
    answer = answer.expand_as(result) #(1,sz) -> (topk,sz)

    correct = (result == answer)    #(topk,sz) of bool vals
    correct = correct.flatten()     #(topk*sz) of bool vals
    correct = correct.float()       #(topk*sz) of 1s or 0s
    correct = correct.sum()         #counts 1s (correct guesses)
    correct = correct.mul_(100/bz)  #convert into percentage

    return correct.item()

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment