-
-
Save anonymous/b45c4f062ac6c3dfaf9c to your computer and use it in GitHub Desktop.
The visualization source code
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <opencv2/highgui/highgui.hpp> | |
#include <opencv2/imgproc/imgproc.hpp> | |
#include <opencv2/core/core.hpp> | |
#include <emc/io.h> | |
#include <emc/rate.h> | |
#include <ros/ros.h> | |
#include <iostream> | |
#include <stdio.h> | |
#include <math.h> | |
#include <sstream> | |
#include <geometry_msgs/Point.h> | |
#include "../global/classes.h" | |
#include "../skill_context/hough_detect.h" | |
double resolution = 0.01; //resolution of the canvas | |
cv::Point2d size = cv::Point2d(500,500); //size of the canvas | |
std::string window_text = "init"; //possibility to place a debug text in the visualization window | |
go_to setpoint; //class to store the setpoint in, pulled from the rostopic of the robot | |
/* setpointCallback() | |
* ACT: This function is called when a message is received | |
* on the setpoint rostopic. | |
* | |
* IN: - Geometry message setpoint | |
* | |
* OUT: - Void | |
*/ | |
void setpointCallback(const geometry_msgs::Point::ConstPtr& msg) | |
{ | |
setpoint.x = msg->x; | |
setpoint.y = msg->y; | |
} | |
/* Visualization() | |
* ACT: The visualization is used to display what the robots | |
* sees and decides. It calls different vital function in the program | |
* on which decisions are made. For example the hough lines, | |
* the found junctions and the dead ends are plotted. Furthermore it | |
* subscribe to a rostopic "setpoint" so we can see what the current setpoint | |
* of the robot is. Rostopics are used so even in experiments we can display | |
* the setpoint of the robot in the visualization running on another computer. | |
* This visualization is build on the work of Sjoer van der Dries on emc-viz. | |
*/ | |
int main(int argc, char **argv) | |
{ | |
emc::IO io; | |
emc::LaserData scan; | |
emc::Rate r(10); | |
ros::init(argc, argv, "group8_viz"); | |
// This is the nodehandler initialized to receive data that the robot publishes, | |
// setpointCallback is run when data is received. | |
ros::NodeHandle nh2; | |
ros::Subscriber sub_setpoint = nh2.subscribe("/pico/group8/setpoint", 10, setpointCallback); | |
while(io.ok()) | |
{ | |
ros::spinOnce(); // check of new data is published on the rostopic | |
cv::Mat canvas(size.x, size.y, CV_8UC3, cv::Scalar(0, 0, 0)); // this defines the window size of the visualization | |
if (!io.readLaserData(scan)) | |
continue; | |
// Draw the LRF data points | |
double a = scan.angle_min; | |
for(unsigned int i = 0; i < scan.ranges.size(); ++i) | |
{ | |
double x = cos(a) * scan.ranges[i]; | |
double y = sin(a) * scan.ranges[i]; | |
cv::Point2d p = worldToCanvas(x, y); | |
if (p.x >= 0 && p.y >= 0 && p.x < canvas.cols && p.y < canvas.rows) | |
canvas.at<cv::Vec3b>(p) = cv::Vec3b(0, 255, 0); | |
a += scan.angle_increment; | |
} | |
// Draw the setpoint, published by the robot | |
cv::Point2d s = worldToCanvas(setpoint.x, setpoint.y); | |
circle(canvas, s, 3, CV_RGB(255,0,0), -1); | |
// Draw the robot | |
cv::Scalar robot_color(0, 0, 255); | |
std::vector<std::pair<double, double> > robot_points; | |
robot_points.push_back(std::pair<double, double>( 0.1, -0.2)); | |
robot_points.push_back(std::pair<double, double>( 0.1, -0.1)); | |
robot_points.push_back(std::pair<double, double>( 0.05, -0.1)); | |
robot_points.push_back(std::pair<double, double>( 0.05, 0.1)); | |
robot_points.push_back(std::pair<double, double>( 0.1, 0.1)); | |
robot_points.push_back(std::pair<double, double>( 0.1, 0.2)); | |
robot_points.push_back(std::pair<double, double>(-0.1, 0.2)); | |
robot_points.push_back(std::pair<double, double>(-0.1, -0.2)); | |
for(unsigned int i = 0; i < robot_points.size(); ++i) | |
{ | |
unsigned int j = (i + 1) % robot_points.size(); | |
cv::Point2d p1 = worldToCanvas(robot_points[i].first, robot_points[i].second); | |
cv::Point2d p2 = worldToCanvas(robot_points[j].first, robot_points[j].second); | |
cv::line(canvas, p1, p2, robot_color, 2); | |
} | |
// Call houghdetect() to draw the hough lines in the canvas | |
std::vector<cv::Vec6d> clean_lines = houghdetect(scan, size); | |
for( size_t i = 0; i < clean_lines.size(); i++ ) | |
{ | |
cv::Point2d P1 = worldToCanvas2(clean_lines[i][0], clean_lines[i][1]); | |
cv::Point2d P2 = worldToCanvas2(clean_lines[i][2], clean_lines[i][3]); | |
cv::line( canvas, P1, P2, cv::Scalar(255,0,0), 3, 8 ); | |
cv::circle(canvas, P1, 3, CV_RGB(255,0,0), -1); | |
cv::circle(canvas, P2, 3, CV_RGB(255,255,0), -1); | |
} | |
// Put in the deubg text | |
cv::putText(canvas, window_text, cv::Point2d(450,450), 1, 1.0, CV_RGB(255,255,255)); | |
// Draw junction setpoints | |
std::vector<Vec2d> corr_sp; | |
corr_sp = avg_calc_junction(io, size); | |
for (size_t i = 0; i < corr_sp.size(); i++) | |
{ | |
cv::Point2d P = worldToCanvas2(corr_sp[i][0], corr_sp[i][1]); | |
std::string locline; | |
stringstream ss (stringstream::in | stringstream::out); | |
ss << i; | |
locline = ss.str(); | |
cv::putText(canvas,locline, P, 1, 1.0, CV_RGB(255,0,255)); | |
} | |
// Draw junction deadends | |
std::vector<Vec2d> dead_sp; | |
dead_sp = deadend_detect(scan, size); | |
for (size_t i = 0; i < dead_sp.size(); i++) | |
{ | |
cv::Point2d P = worldToCanvas2(dead_sp[i][0], dead_sp[i][1]); | |
std::string locline; | |
stringstream ss (stringstream::in | stringstream::out); | |
ss << "DE " << i; | |
locline = ss.str(); | |
cv::putText(canvas,locline, P, 1, 1.0, CV_RGB(255,0,255)); | |
} | |
// Draw the image and wait before destroying it | |
cv::imshow("PICO Viz", canvas); | |
cv::waitKey(3); | |
r.sleep(); | |
} | |
return 0; | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment