Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
/*
MIT License
Copyright 2021 Stefan Novak
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
import { PanelExtensionContext, RenderState } from "@foxglove/studio";
// Babylon.js is used for overlaying a wireframe computed by pose estimation telemetry over camera sensor images.
import { Engine, Scene, Layer, DynamicTexture, UniversalCamera, Vector3, Quaternion, SceneLoader, Color3,
StandardMaterial, Axis, Nullable, TransformNode } from "@babylonjs/core";
// Babylon.js provides a library for loading mesh data in glTF format, a file used for representing objects in a 3D
// scene. This file format is used for loading the spacecraft 3D model and rendering it as a wireframe.
import "@babylonjs/loaders/glTF";
import ReactDOM from "react-dom";
import { useLayoutEffect, useRef } from "react";
// Webpack will use its internal JSON loader to ready the glTF data for the spacecraft model and embed that data as an
// object in the compiled JavaScript module. This allows us to directly embed spacecraft model geometry in our source
// code without having to statically serve the file -- which isn't readily supported in Foxglove Studio extensions yet.
import spacecraft_model from "./assets/spacecraft_model.gltf.json";
function PoseVisualizationPanel({ context }: { context: PanelExtensionContext }): JSX.Element {
let engine: Engine;
let scene: Scene;
const canvas = useRef<HTMLCanvasElement>(null);
// The ROS topics used for subscribing to camera images and pose estimates.
const cameraTopicName = "/freeflyer/camera";
const poseTopicName = "/freeflyer/pose";
let transformNode: Nullable<TransformNode>;
useLayoutEffect(() => {
engine = new Engine(canvas.current, true);
scene = new Scene(engine);
// A transform node is used as the parent node for the imported mesh. By modifying its position and orientation
// from pose estimation telemetry, the wireframe mesh will be positioned correctly.
transformNode = new TransformNode("transform", scene);
const camera = new UniversalCamera("camera", Vector3.Zero(), scene);
// TODO: receive this from a sensor_msgs/CameraInfo message.
camera.fov = 0.785398;
camera.setTarget(new Vector3(0, 0, -1));
// The camera sensor image message received from ROS will be used to update a DynamicTexture for a Layer.
const cameraImageLayer = new Layer("cameraImage", null, scene, true);
// TODO: receive image width/height from a sensor_msgs/CameraInfo message. Also hook up image resizer logic.
const cameraImageTexture = new DynamicTexture("cameraImage", {width: 800, height: 600}, scene, false);
cameraImageLayer.texture = cameraImageTexture;
const cameraImageTextureContext = cameraImageTexture.getContext();
SceneLoader.ImportMesh("", "", `data:${JSON.stringify(spacecraft_model)}`, scene, (meshes) => {
const root = meshes[0];
if (root) {
root.parent = transformNode;
root.rotate(Axis.X, Math.PI / 2);
root.rotate(Axis.Y, Math.PI / 2);
}
meshes.forEach((mesh, index) => {
const material = new StandardMaterial(`spacecraft_model-mesh-${index}`, scene);
material.wireframe = true;
material.emissiveColor = Color3.Green();
mesh.material = material;
});
context.onRender = async (renderState: RenderState, done: () => void) => {
const messages = {};
renderState.currentFrame?.forEach(({topic, message}) => {
messages[topic] = message;
});
if (cameraTopicName in messages) {
const cameraImageBlob = new Blob([messages[cameraTopicName].data], {'type': 'image/png'});
const cameraImageBitmap = await createImageBitmap(cameraImageBlob);
cameraImageTextureContext.drawImage(cameraImageBitmap, 0, 0);
cameraImageBitmap.close();
cameraImageTexture.update();
}
if (poseTopicName in messages) {
const pose = messages[poseTopicName].pose;
// The sensor body frame coordinate system is different than BabylonJS's coordinate system, so
// apply a transform on-the-fly.
const position = new Vector3(
-pose.position.y,
pose.position.x,
-pose.position.z
);
const orientation = new Quaternion(
pose.orientation.y,
-pose.orientation.x,
pose.orientation.z,
pose.orientation.w
);
if (transformNode) {
transformNode.rotationQuaternion = orientation;
transformNode.position = position;
}
}
scene.afterRender = done;
scene.render();
};
context.watch("currentFrame");
context.subscribe([poseTopicName, cameraTopicName]);
});
}, []);
return (
<canvas ref={canvas} width={800} height={600} />
)
}
export function initPoseVisualizationPanel(context: PanelExtensionContext) {
ReactDOM.render(<PoseVisualizationPanel context={context} />, context.panelElement);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment