Skip to content

Instantly share code, notes, and snippets.

@younesbelkada
Created December 27, 2022 19:32
Show Gist options
  • Save younesbelkada/2850ae914112d4c43cf83f33055e6dc1 to your computer and use it in GitHub Desktop.
Save younesbelkada/2850ae914112d4c43cf83f33055e6dc1 to your computer and use it in GitHub Desktop.
utility script to evaluate an OD model on a custom dataset - cppe5
import os
import json
import evaluate
import torch
import torchvision
import numpy as np
from tqdm import tqdm
from PIL import Image
from transformers import DetrFeatureExtractor, DetrForObjectDetection, DetrImageProcessor
from datasets import load_dataset
cppe5 = load_dataset("cppe-5")
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, feature_extractor, ann_file):
super(CocoDetection, self).__init__(img_folder, ann_file)
self.feature_extractor = feature_extractor
def __getitem__(self, idx):
# read in PIL image and target in COCO format
img, target = super(CocoDetection, self).__getitem__(idx)
# preprocess image and target (converting target to DETR format, resizing + normalization of both image and target)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
encoding = self.feature_extractor(images=img, annotations=target, return_tensors="pt")
pixel_values = encoding["pixel_values"].squeeze() # remove batch dimension
target = encoding["labels"][0] # remove batch dimension
return pixel_values, target
feature_extractor = DetrFeatureExtractor.from_pretrained("MariaK/detr-resnet-50_fine_tuned_cppe5")
im_processor = DetrImageProcessor.from_pretrained("MariaK/detr-resnet-50_fine_tuned_cppe5")
model = DetrForObjectDetection.from_pretrained("MariaK/detr-resnet-50_fine_tuned_cppe5")
def collate_fn(batch):
pixel_values = [item[0] for item in batch]
encoding = feature_extractor.pad_and_create_pixel_mask(pixel_values, return_tensors="pt")
labels = [item[1] for item in batch]
batch = {}
batch['pixel_values'] = encoding['pixel_values']
batch['pixel_mask'] = encoding['pixel_mask']
batch['labels'] = labels
return batch
# prepare the test dataset
def val_formatted_anns(image_id, objects):
annotations = []
for i in range(0,len(objects["id"])):
new_ann = {"id": objects["id"][i],
"category_id": objects["category"][i],
"iscrowd": 0,
"image_id": image_id,
"area": objects["area"][i],
"bbox": objects["bbox"][i]}
annotations.append(new_ann)
return annotations
def save_cppe5_annotation_file_images(cppe5):
output_json = {}
path_output_cppe5 = f"{os.getcwd()}/cppe5/"
if not os.path.exists(path_output_cppe5):
os.makedirs(path_output_cppe5)
path_anno = os.path.join(path_output_cppe5, "cppe5_ann.json")
# We map them to their category following the COCO format
# 0: coverall
# 1: face_shield
# 2: gloves
# 3: goggles
# 4: mask
categories_json = [
{
"supercategory": "none",
"id": 0,
"name": "coverall"
},
{
"supercategory": "none",
"id": 1,
"name": "face_shield",
},
{
"supercategory": "none",
"id": 2,
"name": "gloves",
},
{
"supercategory": "none",
"id": 3,
"name": "goggles",
},
{
"supercategory": "none",
"id": 4,
"name": "mask",
}
]
output_json["images"] = []
output_json["annotations"] = []
for batch in cppe5:
ann = val_formatted_anns(batch["image_id"], batch["objects"])
output_json["images"].append(
{
"id": batch["image_id"],
"width": batch["image"].width,
"height": batch["image"].height,
"file_name": f"{batch['image_id']}.png"
}
)
output_json["annotations"].extend(ann)
output_json["categories"] = categories_json
with open(path_anno, "w") as file:
json.dump(output_json, file, ensure_ascii=False, indent=4)
for im, img_id in zip(cppe5["image"], cppe5["image_id"]):
path_img = os.path.join(path_output_cppe5, f"{img_id}.png")
im.save(path_img)
return path_output_cppe5, path_anno
path_output_cppe5, path_anno = save_cppe5_annotation_file_images(cppe5["test"])
dummy = CocoDetection(path_output_cppe5, feature_extractor, path_anno)
module = evaluate.load("ybelkada/cocoevaluate", coco=dummy.coco)
val_dataloader = torch.utils.data.DataLoader(dummy, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn)
with torch.no_grad():
for idx, batch in enumerate(tqdm(val_dataloader)):
# set to device
pixel_values = batch["pixel_values"]
pixel_mask = batch["pixel_mask"]
labels = [{k: v for k, v in t.items()} for t in batch["labels"]] # these are in DETR format, resized + normalized
# forward pass
outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0)
results = im_processor.post_process(outputs, orig_target_sizes) # convert outputs of model to COCO api
module.add(prediction=results, reference=labels)
del batch
results = module.compute()
print(results)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment