Skip to content

Instantly share code, notes, and snippets.

View xiong-jie-y's full-sized avatar

Xiong Jie xiong-jie-y

  • Tokyo, Japan
View GitHub Profile
@xiong-jie-y
xiong-jie-y / log_reciever.py
Last active May 5, 2020 01:54
Sensor Logger
import zmq
import json
import argparse
import os
import datetime
def main():
context = zmq.Context()
socket = context.socket(zmq.REP)
import open3d as o3d
import numpy as np
import json
def get_first_landmarks_in_frames():
face_landmark_frames = json.load(open("face_landmark_frames_20200425-220448.json", "r"))
return [np.array(face_landmark_frames[0][0]['landmarks']) for face_landmark_frame in face_landmark_frames]
first_landmarks = get_first_landmarks_in_frames()
@xiong-jie-y
xiong-jie-y / hand_face_tracking_desktop.pbtxt
Created April 21, 2020 15:34
Multi face and multi hand tracking with Mediapipe on Ubuntu
# MediaPipe graph that performs multi-hand tracking with TensorFlow Lite on GPU.
# Used in the examples in
# mediapipe/examples/android/src/java/com/mediapipe/apps/multihandtrackinggpu.
# Images coming into and out of the graph.
input_stream: "input_video"
output_stream: "output_video"
# Collection of detected/processed faces, each represented as a list of
# landmarks. (std::vector<NormalizedLandmarkList>)
@xiong-jie-y
xiong-jie-y / gist:debd100f71f550707897872774f5c0cd
Created April 21, 2020 15:34
Multi face and multi hand tracking with Mediapipe on Ubuntu
We couldn’t find that file to show.
@xiong-jie-y
xiong-jie-y / hand_face_tracking_desktop.pbtxt
Created April 21, 2020 15:32
Multi face and multi hand tracking using mediapipe on Ubuntu.
# MediaPipe graph that performs multi-hand tracking with TensorFlow Lite on GPU.
# Used in the examples in
# mediapipe/examples/android/src/java/com/mediapipe/apps/multihandtrackinggpu.
# Images coming into and out of the graph.
input_stream: "input_video"
output_stream: "output_video"
# Collection of detected/processed faces, each represented as a list of
# landmarks. (std::vector<NormalizedLandmarkList>)
@xiong-jie-y
xiong-jie-y / subscribe_to_detection.py
Created April 19, 2020 10:15
Detection Subscriber
import zmq
import sys
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.connect("tcp://localhost:5555")
# receive only message with zipcode being 10001
zipfilter = sys.argv if len(sys.argv) > 1 else "Detection"
subscriber.setsockopt(zmq.SUBSCRIBE, zipfilter.encode('utf-8'))
@xiong-jie-y
xiong-jie-y / HelloClient.cs
Last active April 13, 2020 13:31
Virtual Character Streaming in Ubuntu
using UnityEngine;
using VRM;
using AsyncIO;
using NetMQ;
using NetMQ.Sockets;
using MessagePack;
using System;
@xiong-jie-y
xiong-jie-y / stream_window_to_virtual_camera.py
Created April 12, 2020 08:07
Virtual Character Streaming in Ubuntu
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import numpy
def get_window_screen(window_id):
window = Gdk.get_default_root_window()
screen = window.get_screen()
typ = window.get_type_hint()
@xiong-jie-y
xiong-jie-y / stream_window_to_virtual_camera.py
Created April 12, 2020 08:07
Virtual Character Streaming in Ubuntu
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import numpy
def get_window_screen(window_id):
window = Gdk.get_default_root_window()
screen = window.get_screen()
typ = window.get_type_hint()
@xiong-jie-y
xiong-jie-y / stream_window_to_virtual_camera.py
Created April 12, 2020 08:07
Virtual Character Streaming in Ubuntu
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import numpy
def get_window_screen(window_id):
window = Gdk.get_default_root_window()
screen = window.get_screen()
typ = window.get_type_hint()