Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
# Code adapted from Tensorflow Object Detection Framework
# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
# Tensorflow Object Detection Detector
import numpy as np
import tensorflow as tf
import cv2
import time
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end_time = time.time()
print("Elapsed Time:", end_time-start_time)
im_height, im_width,_ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0,i,0] * im_height),
int(boxes[0,i,1]*im_width),
int(boxes[0,i,2] * im_height),
int(boxes[0,i,3]*im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
if __name__ == "__main__":
model_path = '/path/to/faster_rcnn_inception_v2_coco_2017_11_08/frozen_inference_graph.pb'
odapi = DetectorAPI(path_to_ckpt=model_path)
threshold = 0.7
cap = cv2.VideoCapture('/path/to/input/video')
while True:
r, img = cap.read()
img = cv2.resize(img, (1280, 720))
boxes, scores, classes, num = odapi.processFrame(img)
# Visualization of the results of a detection.
for i in range(len(boxes)):
# Class 1 represents human
if classes[i] == 1 and scores[i] > threshold:
box = boxes[i]
cv2.rectangle(img,(box[1],box[0]),(box[3],box[2]),(255,0,0),2)
cv2.imshow("preview", img)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
@chrisfauerbach

This comment has been minimized.

Copy link

commented Aug 21, 2018

Madhawa - I found your medium post tonight on 'people detection'. I'm building out a project, with code awfully similar. Looking at the code on line 76-80, your application is still 'finding' everything right? but only highlighting people? or am I missing something?

@jiapei100

This comment has been minimized.

Copy link

commented Sep 26, 2018

Where can I adjust batch_size ?
I got the following ERROR messages:

Limit:                  3022389248
InUse:                  1819312384
MaxInUse:               2296556288
NumAllocs:                    2107
MaxAllocSize:           1704329216

2018-09-26 01:26:45.888177: W tensorflow/core/common_runtime/bfc_allocator.cc:275] **************_____***********************************************__________________________________
2018-09-26 01:26:45.888213: W tensorflow/core/framework/op_kernel.cc:1273] OP_REQUIRES failed at transpose_op.cc:199 : Resource exhausted: OOM when allocating tensor with shape[300,4032,17,17] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
Traceback (most recent call last):
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1292, in _do_call
    return fn(*args)
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1277, in _run_fn
    options, feed_dict, fetch_list, target_list, run_metadata)
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1367, in _call_tf_sessionrun
    run_metadata)
tensorflow.python.framework.errors_impl.ResourceExhaustedError: OOM when allocating tensor with shape[300,4032,17,17] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
         [[{{node MaxPool2D/MaxPool-0-TransposeNHWCToNCHW-LayoutOptimizer}} = Transpose[T=DT_FLOAT, Tperm=DT_INT32, _device="/job:localhost/replica:0/task:0/device:GPU:0"](CropAndResize, PermConstNHWCToNCHW-LayoutOptimizer)]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.

         [[{{node SecondStagePostprocessor/BatchMultiClassNonMaxSuppression/map/while/MultiClassNonMaxSuppression/ClipToWindow_69/Gather/Gather_2/_625}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_11273...r/Gather_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](^_cloopSecondStagePostprocessor/BatchMultiClassNonMaxSuppression/map/while/MultiClassNonMaxSuppression/SortByField/Assert/Assert/data_0/_31)]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.


During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "./tensorflow-human-detection.py", line 72, in <module>
    boxes, scores, classes, num = odapi.processFrame(img)
  File "./tensorflow-human-detection.py", line 43, in processFrame
    feed_dict={self.image_tensor: image_np_expanded})
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 887, in run
    run_metadata_ptr)
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1110, in _run
    feed_dict_tensor, options, run_metadata)
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1286, in _do_run
    run_metadata)
  File "~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1308, in _do_call
    raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.ResourceExhaustedError: OOM when allocating tensor with shape[300,4032,17,17] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
         [[{{node MaxPool2D/MaxPool-0-TransposeNHWCToNCHW-LayoutOptimizer}} = Transpose[T=DT_FLOAT, Tperm=DT_INT32, _device="/job:localhost/replica:0/task:0/device:GPU:0"](CropAndResize, PermConstNHWCToNCHW-LayoutOptimizer)]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.

         [[{{node SecondStagePostprocessor/BatchMultiClassNonMaxSuppression/map/while/MultiClassNonMaxSuppression/ClipToWindow_69/Gather/Gather_2/_625}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_11273...r/Gather_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](^_cloopSecondStagePostprocessor/BatchMultiClassNonMaxSuppression/map/while/MultiClassNonMaxSuppression/SortByField/Assert/Assert/data_0/_31)]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.

@mzkaramat

This comment has been minimized.

Copy link

commented Nov 29, 2018

Thanks for the tutorial, it is very informative. Is there better way to retrain the model only on the human instead of getting all classes as output?

@KeitelDOG

This comment has been minimized.

Copy link

commented Mar 6, 2019

Very good codes. Is there a way that I can filter classes to use only person, car. I know their class value are 1 for person, 3 for car, but how coul I possibly make this happen here in line 33 :

self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
@AnalyticsInAction

This comment has been minimized.

Copy link

commented Mar 23, 2019

Runs fine for me, but freezes when I try and quit out with "q". Issue is resolved when I append the following code to the end of the script to destroy the windows.

cap.release()
cv2.destroyAllWindows()

@vchacham

This comment has been minimized.

Copy link

commented Jun 5, 2019

How do I get rid of this error?
AttributeError: module 'tensorflow' has no attribute 'GraphDef'
I have added graph.proto file to the tensorflow/core/framework/graph.proto
But still getting the same error
Please help me.. Thank you


AttributeError Traceback (most recent call last)
in ()
54 if name == "main":
55 model_path = '/path/to/faster_rcnn_inception_v2_coco_2017_11_08/frozen_inference_graph.pb'
---> 56 odapi = DetectorAPI(path_to_ckpt=model_path)
57 threshold = 0.7
58 cap = cv2.VideoCapture('/path/to/input/video')

in init(self, path_to_ckpt)
7 self.detection_graph = tf.Graph()
8 with self.detection_graph.as_default():
----> 9 od_graph_def = tf.GraphDef()
10 with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
11 serialized_graph = fid.read()

AttributeError: module 'tensorflow' has no attribute 'GraphDef'

@Raymundo1

This comment has been minimized.

Copy link

commented Jul 8, 2019

Very good codes. Is there a way that I can filter classes to use only person, car. I know their class value are 1 for person, 3 for car, but how coul I possibly make this happen here in line 33 :

self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')

change line 78 to the specific class you want

@arminf82

This comment has been minimized.

Copy link

commented Jul 31, 2019

Great code. thanks!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.