Skip to content

Instantly share code, notes, and snippets.

@PieterjanCriel
Created September 12, 2022 08:03
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save PieterjanCriel/0d0f1a4aaceef6bcf03178f877d212f9 to your computer and use it in GitHub Desktop.
Save PieterjanCriel/0d0f1a4aaceef6bcf03178f877d212f9 to your computer and use it in GitHub Desktop.
YoloDetector
class YoloDetector():
CONFIDENCE_VALUE = 0.50 # minimum probability to filter weak detections, value of 25% now
THRESHOLD_VALUE = 0.25 # non-maximum supression threshold with default value of 0.45;
def __init__(self,model_path, model_version=None, img_size=1000):
self.model_path = model_path
self.img_size = img_size
self.conf_thres = YoloDetector.CONFIDENCE_VALUE
self.iou_thres = YoloDetector.THRESHOLD_VALUE
self.device = select_device('cpu')
self.prepare_model()
def prepare_model(self):
weights = self.model_path
self.model = DetectMultiBackend(weights, device=self.device, dnn=False)
self.imgsz = check_img_size(self.img_size, s=self.model.stride)
self.model.model.float()
self.label_names = self.model.names
def evaluate(self, imgfilepath, original_path=None):
dataset = LoadImages(imgfilepath, img_size=self.imgsz, stride=self.model.stride, auto=self.model.pt and not self.model.jit)
objects = self._evaluate(dataset)
json_output = {"algorithm":self.model_path, "objects": objects}
return json_output
def _evaluate(self, dataset):
for path, im, im0s, vid_cap, s in dataset:
im = torch.from_numpy(im).to(self.device)
im = im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
# Inference
pred = self.model(im, augment=False, visualize=False)
# NMS
pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, None, False, max_det=1000)
# Process predictions
for det in pred: # per image
gn = torch.tensor(im0s.shape)[[1, 0, 1, 0]]
results = []
if len(det):
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0s.shape).round()
for *xyxy, conf, cls in reversed(det):
label = self.model.names[int(cls)]
x,y,w,h = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
results.append({
"label": label,
"confidence_score":int(100*float(conf)),
"coords":{
"center_x": x,
"center_y": y,
"width": w,
"height": h
}
})
return results
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment