Skip to content

Instantly share code, notes, and snippets.

@angus-g
Created May 21, 2020 07:08
Show Gist options
  • Save angus-g/ba8543bf87514c3d126b711e2b3365f0 to your computer and use it in GitHub Desktop.
Save angus-g/ba8543bf87514c3d126b711e2b3365f0 to your computer and use it in GitHub Desktop.
import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
import cv2
import numpy as np
from fluid import Fluid
camera_dev = "/dev/video0"
camera_width = 640
camera_height = 480
camera_fps = 30
cap = cv2.VideoCapture(camera_dev)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
cap.set(cv2.CAP_PROP_FPS, camera_fps)
def impl_glfw_init():
if not glfw.init():
print("Could not initialise OpenGL context")
exit(1)
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
window = glfw.create_window(800, 800, "cam control", None, None,)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialise window")
exit(1)
return window
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, tex)
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER)
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER)
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameter(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameter(
gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_BORDER_COLOR,
np.array([0.0, 0.0, 0.0, 1.0], dtype="float32"),
)
subtractor = cv2.createBackgroundSubtractorMOG2() # background subtracting model
selected = 0 # image to display
# intial mask
_, frame = cap.read()
prev_mask = subtractor.apply(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
prev_mask = cv2.morphologyEx(prev_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
# initialise fluid sim
channels = ("r", "g", "b")
fluid_dims = (100, 100)
visc_power = -3
blob_size = 0.01
velocity_scale = 20
fluid = Fluid(fluid_dims, 10 ** visc_power, channels)
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
fluid.advect_diffuse()
# read webcam data
_, frame = cap.read()
# fake webcam expects RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
mask = subtractor.apply(frame)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
# velocity flow
flow = cv2.calcOpticalFlowFarneback(prev_mask, mask, None, 0.5, 3, 13, 3, 5, 1.2, 0)
prev_mask = mask
# build gui
imgui.new_frame()
if imgui.begin_main_menu_bar():
if imgui.begin_menu("File", True):
clicked_quit, selected_quit = imgui.menu_item(
"Quit", "Ctrl+Q", False, True,
)
if clicked_quit:
exit(1)
imgui.end_menu()
imgui.end_main_menu_bar()
imgui.begin("controls")
imgui.columns(2)
_, selected = imgui.listbox("display", selected, ["live", "mask", "flow", "fluid"])
imgui.next_column()
dims_changed, fluid_dims = imgui.input_int2(
"Fluid resolution", *fluid_dims, flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE
)
visc_changed, visc_power = imgui.drag_float("log10(visc)", visc_power, 0.05, -6, 0,)
_, blob_size = imgui.drag_float("blob", blob_size, 0.01, 0.01, 1)
_, velocity_scale = imgui.input_float("vel scale", velocity_scale)
snapshot = imgui.button("snapshot")
imgui.columns(1)
imgui.end()
if dims_changed:
fluid = Fluid(fluid_dims, fluid.viscosity, channels)
if visc_changed:
fluid.viscosity = 10 ** visc_power
if snapshot:
# resize frame to fluid size and import
data = cv2.resize(frame, tuple(fluid_dims))
for i, c in enumerate(channels):
fluid.quantities[c] = data[..., i].reshape(-1) / 255
fluid.velocity_field[:] = 0
if selected == 0:
# live
data = frame
elif selected == 1:
# mask
data = mask[..., None].repeat(3, axis=-1)
elif selected == 2:
# flow
hsv = np.zeros_like(frame)
hsv[..., 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
data = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
elif selected == 3:
# fluid
data = np.dstack(tuple(fluid.quantities[c] for c in channels))
data = data.reshape((fluid_dims[0], fluid_dims[1], 3))
data = np.clip(data, 0, 1) * 255
data = data.astype("uint8")
data = cv2.resize(data, (camera_width, camera_height))
# upload frame as texture data
gl.glBindTexture(gl.GL_TEXTURE_2D, tex)
gl.glTexImage2D(
gl.GL_TEXTURE_2D,
0,
gl.GL_RGB,
camera_width,
camera_height,
0,
gl.GL_RGB,
gl.GL_UNSIGNED_BYTE,
data,
)
# display data
imgui.begin("live feed")
io = imgui.get_io()
image_p = imgui.get_cursor_screen_pos()
imgui.image_button(tex, camera_width, camera_height, frame_padding=0)
hov = imgui.is_item_hovered()
imgui.text(
"x: {:.3}, y: {:.3}, hov: {}".format(
(io.mouse_pos[0] - image_p[0]) / camera_width,
(io.mouse_pos[1] - image_p[1]) / camera_height,
hov,
5,
)
)
imgui.end()
flow = cv2.resize(flow, tuple(fluid_dims))
fluid.velocity_field += velocity_scale * flow.reshape(-1, 2) / 255
fluid.project()
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
impl.shutdown()
glfw.terminate()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment