Skip to content

Instantly share code, notes, and snippets.

@salman2learn
Last active August 26, 2021 22:40
Show Gist options
  • Save salman2learn/1527449182b394e4fe122e06ff41c2ad to your computer and use it in GitHub Desktop.
Save salman2learn/1527449182b394e4fe122e06ff41c2ad to your computer and use it in GitHub Desktop.
Counts vehicles in an image.
# pip3 install opencv-python==4.2.0.32
# wget https://pjreddie.com/media/files/yolov3.weights
# wget https://raw.githubusercontent.com/guptavasu1213/Yolo-Vehicle-Counter/master/yolo-coco/coco.names
# wget https://raw.githubusercontent.com/guptavasu1213/Yolo-Vehicle-Counter/master/yolo-coco/yolov3.cfg
# Finally, put this code in the same folder as above 3 files.
# python vehicle-counter.py
# References:
# Original source: https://github.com/guptavasu1213/Yolo-Vehicle-Counter
# Coco dataset: https://cocodataset.org/#home
# NMS: https://towardsdatascience.com/non-maximum-suppression-nms-93ce178e177c
import numpy as np
import cv2
preDefinedConfidence = 0.5
preDefinedThreshold = 0.3
boxes, confidences, classIDs = [], [], []
net = cv2.dnn.readNetFromDarknet('yolov3.cfg', 'yolov3.weights')
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
LABELS = open('coco.names').read().strip().split("\n") # all 80 categories in coco dataset
list_of_vehicles = ["bicycle","car","motorbike","bus","truck", "train"] # categories of our interest
def getVehicleCount(imgpath):
img = cv2.imread(imgpath)
inputHeight, inputWidth, channels = img.shape
inputHeight = inputHeight - (inputHeight % 32)
inputWidth = inputWidth - (inputWidth % 32)
blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (inputWidth, inputHeight), swapRB=True, crop=False)
net.setInput(blob)
detections = net.forward(ln)
# prepare arrays for applying NMS (Non-maximum Suppression) later
for detected in detections:
for i, detection in enumerate(detected):
# extract the class ID and confidence (i.e., probability) of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > preDefinedConfidence:
# YOLO returns the center (x, y)-coordinates of box followed by the boxes' width and height
# scale the bounding box coordinates back relative to the size of the image.
box = detection[0:4] * np.array([inputWidth, inputHeight, inputWidth, inputHeight])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, preDefinedConfidence, preDefinedThreshold)
# filter the list to pick only categories of our interest
vehiclelist = [LABELS[classIDs[i]] for i in idxs.flatten() if LABELS[classIDs[i]] in list_of_vehicles]
return {imgpath : len(vehiclelist) }
print(getVehicleCount("your-traffic-file.png"))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment