Skip to content

Instantly share code, notes, and snippets.

@feldim2425
Created May 15, 2021 10:18
Show Gist options
  • Save feldim2425/6b18846a1e64b26ca5e0a1e14df7ccf3 to your computer and use it in GitHub Desktop.
Save feldim2425/6b18846a1e64b26ca5e0a1e14df7ccf3 to your computer and use it in GitHub Desktop.
# MediaPipe graph that performs face mesh with TensorFlow Lite on CPU.
# Input image. (ImageFrame)
input_stream: "input_video"
# Output image with rendered results. (ImageFrame)
output_stream: "output_video"
# Collection of detected/processed faces, each represented as a list of
# landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks"
# Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it
# passes through another image. All images that come in while waiting are
# dropped, limiting the number of in-flight images in most part of the graph to
# 1. This prevents the downstream nodes from queuing up incoming images and data
# excessively, which leads to increased latency and memory usage, unwanted in
# real-time mobile applications. It also eliminates unnecessarily computation,
# e.g., the output produced by a node may get dropped downstream if the
# subsequent nodes are still busy processing previous inputs.
node {
calculator: "FlowLimiterCalculator"
input_stream: "input_video"
input_stream: "FINISHED:output_video"
input_stream_info: {
tag_index: "FINISHED"
back_edge: true
}
output_stream: "throttled_input_video"
}
# Defines side packets for further use in the graph.
node {
calculator: "ConstantSidePacketCalculator"
output_side_packet: "PACKET:num_faces"
node_options: {
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
packet { int_value: 1 }
}
}
}
# Generates side packet cotaining max number of hands to detect/track.
node {
calculator: "ConstantSidePacketCalculator"
output_side_packet: "PACKET:num_hands"
node_options: {
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
packet { int_value: 2 }
}
}
}
# Detects/tracks hand landmarks.
#node {
# calculator: "HandLandmarkTrackingCpu"
# input_stream: "IMAGE:input_video"
# input_side_packet: "NUM_HANDS:num_hands"
# output_stream: "LANDMARKS:hand_landmarks"
# output_stream: "HANDEDNESS:handedness"
# output_stream: "PALM_DETECTIONS:multi_palm_detections"
# output_stream: "HAND_ROIS_FROM_LANDMARKS:multi_hand_rects"
# output_stream: "HAND_ROIS_FROM_PALM_DETECTIONS:multi_palm_rects"
#}
# Subgraph that detects faces and corresponding landmarks.
node {
calculator: "FaceLandmarkFrontCpu"
input_stream: "IMAGE:throttled_input_video"
input_side_packet: "NUM_FACES:num_faces"
output_stream: "LANDMARKS:multi_face_landmarks"
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
output_stream: "DETECTIONS:face_detections"
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
}
# Subgraph that renders face-landmark annotation onto the input image.
node {
calculator: "RendererCustom"
# input_stream: "IMAGE:throttled_input_video"
input_stream: "LANDMARKS:multi_face_landmarks"
#input_stream: "LANDMARKS:hand_landmarks"
input_stream: "IMAGE:throttled_input_video"
input_stream: "NORM_RECTS:0:face_rects_from_landmarks"
#input_stream: "NORM_RECTS:1:multi_palm_rects"
#input_stream: "NORM_RECTS:2:multi_hand_rects"
input_stream: "DETECTIONS:face_detections"
output_stream: "IMAGE:output_video"
}
# MediaPipe face mesh rendering subgraph.
type: "RendererCustom"
# CPU image. (ImageFrame)
input_stream: "IMAGE:input_image"
# Collection of detected/predicted faces, each represented as a list of
# landmarks. (std::vector<NormalizedLandmarkList>)
input_stream: "LANDMARKS:multi_face_landmarks"
# Regions of interest calculated based on palm detections.
# (std::vector<NormalizedRect>)
input_stream: "NORM_RECTS:rects"
# Detected palms. (std::vector<Detection>)
input_stream: "DETECTIONS:detections"
# CPU image with rendered data. (ImageFrame)
output_stream: "IMAGE:output_image"
# Converts detections to drawing primitives for annotation overlay.
node {
calculator: "DetectionsToRenderDataCalculator"
input_stream: "DETECTIONS:detections"
output_stream: "RENDER_DATA:detections_render_data"
node_options: {
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
thickness: 4.0
color { r: 0 g: 255 b: 0 }
}
}
}
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
# of the graph to process. At the end of the loop, outputs the BATCH_END
# timestamp for downstream calculators to inform them that all elements in the
# vector have been processed.
node {
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
input_stream: "ITERABLE:multi_face_landmarks"
output_stream: "ITEM:face_landmarks"
output_stream: "BATCH_END:landmark_timestamp"
}
# Converts landmarks to drawing primitives for annotation overlay.
node {
calculator: "FaceLandmarksToRenderDataCalculator"
input_stream: "NORM_LANDMARKS:face_landmarks"
output_stream: "RENDER_DATA:landmarks_render_data"
node_options: {
[type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] {
landmark_color { r: 234 g: 5 b: 255 }
connection_color { r: 100 g: 100 b: 100 }
thickness: 2
visualize_landmark_depth: false
}
}
}
# Collects a RenderData object for each hand into a vector. Upon receiving the
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
# timestamp.
node {
calculator: "EndLoopRenderDataCalculator"
input_stream: "ITEM:landmarks_render_data"
input_stream: "BATCH_END:landmark_timestamp"
output_stream: "ITERABLE:multi_face_landmarks_render_data"
}
# Converts normalized rects to drawing primitives for annotation overlay.
node {
calculator: "RectToRenderDataCalculator"
input_stream: "NORM_RECTS:rects"
output_stream: "RENDER_DATA:rects_render_data"
node_options: {
[type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] {
filled: false
color { r: 255 g: 0 b: 0 }
thickness: 4.0
}
}
}
# Draws annotations and overlays them on top of the input images.
node {
calculator: "AnnotationOverlayCalculator"
# input_stream: "IMAGE:input_image"
input_stream: "detections_render_data"
input_stream: "VECTOR:multi_face_landmarks_render_data"
input_stream: "rects_render_data"
output_stream: "IMAGE:output_image"
node_options: {
[type.googleapis.com/mediapipe.AnnotationOverlayCalculatorOptions] {
canvas_width_px: 1600
canvas_height_px: 1200
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment