Skip to content

Instantly share code, notes, and snippets.

@dreiss
Created April 27, 2020 17:59
Show Gist options
  • Save dreiss/e9d8ca7b3ed295c0d5889de980a23006 to your computer and use it in GitHub Desktop.
Save dreiss/e9d8ca7b3ed295c0d5889de980a23006 to your computer and use it in GitHub Desktop.
diff --git c/fb_sol_v1/ocr_lib.py w/fb_sol_v1/ocr_lib.py
index 7f71e92..cd72b0f 100644
--- c/fb_sol_v1/ocr_lib.py
+++ w/fb_sol_v1/ocr_lib.py
@@ -32,14 +32,17 @@ class OCRLib:
)
def process_rgb_image(self, input_image):
- boxes, scores = self.detector.detect(input_image)
+ with torch.autograd.profiler.record_function("detect"):
+ boxes, scores = self.detector.detect(input_image)
results = []
- for box, score in zip(boxes, scores):
- result = self.recognizer.recognize(input_image, box)
- result["box"] = box.tolist()
- result["det_scr"] = score
- results.append(result)
+ with torch.autograd.profiler.record_function("recognition"):
+ for box, score in zip(boxes, scores):
+ with torch.autograd.profiler.record_function("recognize"):
+ result = self.recognizer.recognize(input_image, box)
+ result["box"] = box.tolist()
+ result["det_scr"] = score
+ results.append(result)
return results
diff --git c/fb_sol_v1/sample_solution.py w/fb_sol_v1/sample_solution.py
index ee48fdc..a83a5ce 100644
--- c/fb_sol_v1/sample_solution.py
+++ w/fb_sol_v1/sample_solution.py
@@ -10,6 +10,7 @@ import sys
import cv2
from ocr_lib import OCRLib
import time
+import torch
def parse_args():
parser = argparse.ArgumentParser(
@@ -84,8 +85,11 @@ def process_video(input_video, cfg_fn, results_path, sampling_rate):
success = True
cnt = 0
while success:
- success, img = vidcap.read()
+ with torch.autograd.profiler.record_function("vidcap.read"):
+ success, img = vidcap.read()
cnt += 1
+ if cnt > 1:
+ break
if not success:
break
@@ -98,10 +102,12 @@ def process_video(input_video, cfg_fn, results_path, sampling_rate):
# if os.path.exists(frm_output):
# continue
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- # resizing if that could be helpful
- # img = cv2.resize(img, (640, 480), fx=0, fy=0, interpolation=cv2.INTER_CUBIC)
- results = reader.process_rgb_image(img)
+ with torch.autograd.profiler.record_function("preprocess"):
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ # resizing if that could be helpful
+ # img = cv2.resize(img, (640, 480), fx=0, fy=0, interpolation=cv2.INTER_CUBIC)
+ with torch.autograd.profiler.record_function("main-process"):
+ results = reader.process_rgb_image(img)
# Dump bounding Box information
with open(frm_output, "w") as h_out:
@@ -166,10 +172,13 @@ def main():
print("OCR Start!")
start = time.time()
# query_video(**vars(args))
- query_video(input_video=sys.argv[1], query_file=sys.argv[2], results_path=results_path)
+ with torch.autograd.profiler.profile() as prof:
+ with torch.autograd.profiler.record_function("whole-program"):
+ query_video(input_video=sys.argv[1], query_file=sys.argv[2], results_path=results_path)
+ prof.export_chrome_trace("/tmp/ocr_prof.json")
print('ORC Done!')
end = time.time()
print("Total time: {} second ".format(end - start))
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff --git c/fb_sol_v1/tracing.py w/fb_sol_v1/tracing.py
new file mode 100644
index 0000000..0d60a28
--- /dev/null
+++ w/fb_sol_v1/tracing.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+import os
+import sys
+import json
+import time
+import contextlib
+from queue import Queue
+
+
+@contextlib.contextmanager
+def trace(out_file, clock=None):
+ with contextlib.ExitStack() as handles:
+ if isinstance(out_file, str):
+ out_file = open(out_file, "w")
+ handles.enter_context(out_file)
+
+ tracer = Tracer(out_file, clock or time.monotonic)
+ try:
+ yield tracer
+ finally:
+ tracer.shutdown()
+
+
+class Tracer:
+ def __init__(self, handle, clock):
+ self.handle = handle
+ self.clock = clock
+ self.pid = os.getpid()
+ self.handle.write('[')
+
+ @contextlib.contextmanager
+ def section(self, name):
+ event = dict(
+ name=name,
+ #cat=func_filename, # Event Category.
+ #tid=self.thread_id, # Thread ID.
+ ph="B", # Event Type.
+ pid=self.pid, # Process ID.
+ ts=self.clock()*1000000, # Timestamp.
+ )
+ self.handle.write(json.dumps(event))
+ self.handle.write(",\n")
+ try:
+ yield
+ finally:
+ event = dict(
+ name=name,
+ #cat=func_filename, # Event Category.
+ #tid=self.thread_id, # Thread ID.
+ ph="E", # Event Type.
+ pid=self.pid, # Process ID.
+ ts=self.clock()*1000000, # Timestamp.
+ )
+ self.handle.write(json.dumps(event))
+ self.handle.write(",\n")
+
+ def shutdown(self):
+ self.handle.write('{}]') # empty {} so the final entry doesn't end with a comma
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment