Skip to content

Instantly share code, notes, and snippets.

@soulslicer
Created July 2, 2016 17:44
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save soulslicer/871923034ef435098c6b5290bb28b99d to your computer and use it in GitHub Desktop.
Save soulslicer/871923034ef435098c6b5290bb28b99d to your computer and use it in GitHub Desktop.
calib sonar
#!/usr/bin/python
import sys
sys.dont_write_bytecode = True
# Base libraries
import cv2
import time
import string
import json
import os
import numpy as np
from pprint import pprint
# ROS
import roslib
import rospy
import tf
from std_msgs.msg import String
from sensor_msgs.msg import Image
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Imu
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from nav_msgs.msg import Odometry
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import *
from dynamic_reconfigure.server import Server
from dynamic_reconfigure.client import Client
# Others
from Settings import *
from Ros import *
from Algorithms import *
from sonar.msg import *
class ParticleFilterCalibrate():
def __init__(self, Sonar_resolution=Transforms.sonarResolution, Camera_resolution=Transforms.cameraResolution, Npop_particles=300):
# Set Resolutions and particle counts
self.Sonar_resolution = Sonar_resolution
self.Camera_resolution = Camera_resolution
self.Npop_particles = Npop_particles
# Set Noise levels and color levels (User must set these)
self.Xstd_t= 0.3
self.Xstd_r= 3.0
self.Xstd_sonar_color = 10
self.Xstd_camera_color = 15
self.Sonar_color = 255.
self.Camera_color = 0.
self.Count = 0
# Data
self.SonarPixels = None
self.CameraPixels = None
self.StateMean = None
self.StateCovar = None
self.State = None
self.SonarPixelsMean = None
self.CameraPixelsMean = None
self.dataSet = None
# Velocity update
self.F_update = np.matrix([
[1, 0, 0, 0, 0, 0], #TX
[0, 1, 0, 0, 0, 0], #TY
[0, 0, 1, 0, 0, 0], #TZ
[0, 0, 0, 1, 0, 0], #TRoll
[0, 0, 0, 0, 1, 0], #TPitch
[0, 0, 0, 0, 0, 1], #TYaw
])
# Initialize System
self.State = np.asarray([
0.0 + self.Xstd_t * np.random.randn(self.Npop_particles),
0.0 + self.Xstd_t * np.random.randn(self.Npop_particles),
0.0 + self.Xstd_t * np.random.randn(self.Npop_particles),
0.0 + self.Xstd_r * np.random.randn(self.Npop_particles),
0.0 + self.Xstd_r * np.random.randn(self.Npop_particles),
0.0 + self.Xstd_r * np.random.randn(self.Npop_particles),
])
def update_particles(self):
# Update count
self.Count = self.Count + 1
# Add noise
self.State[0:3,:] = self.State[0:3,:] + self.Xstd_t * np.random.randn(3,self.Npop_particles)
self.State[3:6,:] = self.State[3:6,:] + self.Xstd_r * np.random.randn(3,self.Npop_particles)
def calc_log_likelihood(self):
# Get Sonar Information
A_sonar_color = -np.log(np.sqrt(2 * np.pi) * self.Xstd_sonar_color);
B_sonar_color = - 0.5 / (self.Xstd_sonar_color**2);
A_camera_color = -np.log(np.sqrt(2 * np.pi) * self.Xstd_camera_color);
B_camera_color = - 0.5 / (self.Xstd_camera_color**2);
# Setup state and weight matrices
self.Weights = np.zeros((1,self.Npop_particles))
self.Weights[...] = -10000000
# Iterate each state
for p in xrange(0,self.Npop_particles):
CurrState = self.State[:,p]
Weight = 0
for data in self.dataSet:
SonarImage = data["SonarImage"]
CameraImage = data["CameraImage"]
CameraState = data["CameraState"]
# Calculate sonar pixels
SonarTCamera = Transforms.getTransformMatrix(CurrState[3], CurrState[4], CurrState[5], CurrState[0], CurrState[1], CurrState[2])
SonarState = SonarTCamera * CameraState
SonarState = SonarState[0:3,:]
R = (np.sqrt(np.power(SonarState[0,:],2)+np.power(SonarState[1,:],2)+np.power(SonarState[2,:],2)))
SonarStatePolar = np.asarray([
R,
np.degrees(np.arctan2(SonarState[0,:],SonarState[2,:])),
np.degrees(np.arcsin(SonarState[1,:]/R))
])
SonarStatePolar = SonarStatePolar.reshape(3,9)
xs, ys = np.asarray([
SonarStatePolar[0,:]*np.sin(np.radians(SonarStatePolar[1,:])),
SonarStatePolar[0,:]*np.cos(np.radians(SonarStatePolar[1,:])),
])
docalcs = (SonarStatePolar[0,:] > Transforms.sonarMinRange) & (SonarStatePolar[0,:] < Transforms.sonarMaxRange) & (SonarStatePolar[1,:] < Transforms.sonarHOV)
SonarPixels = np.asarray([
np.zeros(3*3),
np.zeros(3*3)
])
for i, (x, y, docalc) in enumerate(zip(xs, ys, docalcs)):
if docalc:
SonarPixels[:,i] = sonar_xym_map(x,y)
else:
SonarPixels[:,i] = 0
SonarPixels = SonarPixels.astype(int)
for i, (us, vs) in enumerate(zip(SonarPixels[0,:], SonarPixels[1,:])):
D = self.Sonar_color - SonarImage[vs,us]
sonar_weight = A_sonar_color + B_sonar_color * abs(D)**1.5;
Weight = Weight + sonar_weight
self.Weights[:,p] = Weight
print p
def resample_particles(self):
L = np.exp(self.Weights - np.amax(self.Weights));
Q = L / np.sum(L);
R = np.cumsum(Q);
T = np.random.rand(1, self.Npop_particles)
a,b = np.histogram(T,R)
I = np.digitize(T.tolist()[0],b)
self.State = self.State[:, I];
def compute_solution(self):
self.StateMean = np.mean(self.State, axis=1)
self.StateCovar = np.cov(self.State)
print self.StateMean
class Calibrate():
"""""""""""""""""""""""""""""""""
Constructor
"""""""""""""""""""""""""""""""""
def __init__(self):
# ROS Data
self.ros = ROS(
ros_callback=self.ros_callback,
ui_callback=self.ui_callback,
topics={
"SonarImg": "/sonar_image",
#"CameraImg": "/frontcam/camera/image_color",
# "CameraImg": "/front_camera/camera/image_color/decompressed",
"CameraImg": "/front_camera/camera/image_rect_color/decompressed",
"Depth": "/depth",
"IMUData": "/navigation/RPY",
"IMUVelData": "/AHRS8/data",
"DVLData": "/navigation/odom/relative",
"SonarPub": "/vision/sonar_processed",
"CameraPub": "/vision/camera_processed",
"SaliencyMap": "/vision/saliency",
"MapPub": "/map",
"UISub": "/ui_callback",
"MLPub": "/ml"
},
timing=1/8.0
)
# Algorithms
self.cvAverage = CvAverage(resolution=Transforms.sonarResolution, maxSize=1)
self.lkSonar = LKOpticalFlow(sonar=True, points_wanted=2)
self.lkCamera = LKOpticalFlow(sonar=False, points_wanted=5)
self.featureExtractor = FeatureExtractor()
# First Run
self.firstRun = True
self.cameraImageMouse = (0,0)
self.sonarImageMouse = (0,0)
self.particleFilterTrack = False
self.pipelineState = False
# Store Data
self.data = None
self.cameraArray = None
self.dataSet = []
self.pf = ParticleFilterCalibrate()
# Special settings for LKCamera
self.lkCamera.track_len = 3
self.lkCamera.detect_interval = 3
self.lkCamera.tracks = []
self.lkCamera.frame_idx = 0
self.lkCamera.lk_params = dict( winSize = (15, 15),
maxLevel = 5,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
self.lkCamera.feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
"""""""""""""""""""""""""""""""""
Start/Stop
"""""""""""""""""""""""""""""""""
def start(self):
if self.pipelineState:
return
self.ros.start()
def stop(self):
if not self.pipelineState:
return
self.ros.stop()
"""""""""""""""""""""""""""""""""
Callbacks
"""""""""""""""""""""""""""""""""
def ui_callback(self, ui_data):
#print ui_data
for key in ui_data.keys():
value = ui_data[key]
if key == "GlobalSettings":
Transforms.globalSettings = json.loads(value)
pprint(Transforms.globalSettings)
if key == "SaveData":
self.save_data(self.sonarImageMouse, self.cameraImageMouse)
if key == "WriteData":
self.write_data(value)
if key == "ParticleFilterTrack":
self.firstRun = True
self.particleFilterTrack = value
self.particleFilterSLAM.clear_state()
if key == "ParticleFilterTesting":
self.particleFilterSLAM.testPrediction = value
if key == "SonarImageMouse":
self.sonarImageMouse = value
if key == "CameraImageMouse":
self.cameraImageMouse = value
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
def ros_callback(self, data):
# FPS
self.elapsed = data["Elapsed"]
self.fpsRate = 1/self.elapsed
# Set Robot to Flat Transform
if data.has_key("IMUData"):
Transforms.FlatTRobot = Transforms.getTransformMatrix(data["IMUData"][0], data["IMUData"][1], 0, 0., 0., 0.)
Transforms.RobotTFlat = inv(Transforms.FlatTRobot)
else:
pass
#rospy.logwarn("Warning no IMU RPY Data")
"""""""""""""""""""""""""""""""""
Image processing
"""""""""""""""""""""""""""""""""
# Check if image data available
if not data.has_key("CameraImg") or not data.has_key("SonarImg"):
return
# Camera Processing
cameraImage = Img.increase_contrast(img=data["CameraImg"], mult=1.0)
# Power Law
sonarImage = img=data["SonarImg"]
#sonarImage = Img.increase_contrast(sonarImage, mult=1.5)
#sonarImage = Img.power_law(sonarImage, power=1.3)
# Remove water noise through running average
self.cvAverage.add_image(sonarImage)
sonarImage = self.cvAverage.process_image()
# Remove bottom part from sonar
sonarImage[Transforms.sonarResolution[1]-20:Transforms.sonarResolution[1], 0:Transforms.sonarResolution[0]] = 0
# Make copy of images to write on
sonarProcessed = sonarImage.copy()
cameraProcessed = cameraImage.copy()
cv2.putText(sonarProcessed, str(round(self.fpsRate,2)), (20,30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0))
# Convert to correct color types
sonarImage = Img.color_to_gray(sonarImage)
# Extract features from sonar and camera
sonarArray = self.featureExtractor.extract_features_sonar(sonarImage, sonarProcessed)
"""""""""""""""""""""""""""""""""
Calibrate
"""""""""""""""""""""""""""""""""
#log(1,"All")
cv2.circle(cameraProcessed,(self.cameraImageMouse[0],self.cameraImageMouse[1]), 4, (0,0,255), -1)
cv2.circle(sonarProcessed,(self.sonarImageMouse[0],self.sonarImageMouse[1]), 4, (0,0,255), -1)
calibGray = cv2.cvtColor(cameraImage, cv2.COLOR_RGB2GRAY)
calibGray = Img.power_law(img=calibGray, power=2.0)
calibGray = calibGray.astype(np.uint8)
ret,maskO = cv2.threshold(calibGray,128,255,cv2.THRESH_BINARY)
mask = cv2.dilate(maskO,None,iterations = 1)
mask = cv2.erode(mask,None,iterations = 3)
#data["CameraProcessed"] = cameraProcessed
#return
_, ctr,hierachy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
frameArray = []
for cnt,hier in zip(ctr,hierachy[0]):
cv2.drawContours(cameraProcessed,[cnt],0,(0,0,255),1)
# Check area
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt,True)
if area < 10 or area > 500:
continue
# Rect Fit
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
boxArr = []
try:
for i in range(1,4):
boxArr.append({
"Cost": Math.l2_norm_2d(box[0], box[i]),
})
boxArr.sort(Math.sort("Cost"))
ma = boxArr[0]["Cost"]
MA = boxArr[1]["Cost"]
ratio = MA/ma
except:
ma = 0
MA = 0
ratio = 0
if ratio > 2.0:
continue
# Bounding Rect
x,y,w,h = cv2.boundingRect(cnt)
cx,cy = x+w/2, y+h/2
#Img.draw_rect(cameraProcessed, [x,y,w,h], color=(255,255,0))
# Centroid
M = cv2.moments(cnt)
if M['m00'] == 0:
continue
centroid_x = int(M['m10']/M['m00'])
centroid_y = int(M['m01']/M['m00'])
#if Math.l2_norm_2d(self.cameraImageMouse, (centroid_x, centroid_y)) > 200.0:
# continue
if centroid_y > 400:
continue
cv2.circle(cameraProcessed,(centroid_x,centroid_y), 2, (0,0,255), -1)
cv2.putText(cameraProcessed, str(perimeter) , (centroid_x+10,centroid_y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0,0,255))
item={
"Class": str(len(frameArray)),
"Contour": cnt,
"Centroid": (centroid_x, centroid_y),
"Area": area,
"Rect": [x,y,w,h],
"TrackValid": 0,
"MA": MA,
"ma": ma,
"Ratio": ratio,
}
frameArray.append(item)
searchImg = np.zeros([480,640,1],dtype=np.uint8)
cameraTracks, cameraVelocities = self.lkCamera.process(maskO, cameraProcessed, self.elapsed, frameArray)
finalArray = []
for item in frameArray:
Img.draw_rect(cameraProcessed, item["Rect"], color=(255,0,0))
cv2.circle(searchImg,item["Centroid"], 10, 255, -1)
finalArray.append(item)
searchImg = (255-searchImg)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((3*3,3), np.float32)
objp[:,:2] = np.mgrid[0:3,0:3].T.reshape(-1,2)
axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
w = 3
h = 3
objp = np.zeros((w*h, 1, 3), np.double)
objp[:,:,:2] = np.mgrid[0:w, 0:h].T.reshape(-1,1,2)
scale = 0.762
for x in range(0, w):
for y in range(0, h):
idx = x*w + y
objp[idx] = [y*scale,x*scale,0]
[ret,corners] = cv2.findCirclesGrid(searchImg, (3,3), flags=cv2.CALIB_CB_SYMMETRIC_GRID+cv2.CALIB_CB_CLUSTERING)
if ret:
#for p in corners:
# cv2.circle(cameraProcessed,(p[0][0],p[0][1]),5,(255,0,0),-1)
#corners2 = cv2.cornerSubPix(searchImg,corners,(11,11),(-1,-1),criteria)
corners2 = corners
#for p in corners2:
# cv2.circle(cameraProcessed,(p[0][0],p[0][1]),5,(0,255,0),-1)
cameraProcessed = cv2.drawChessboardCorners(cameraProcessed, (3, 3), corners2, ret)
F = Transforms.F[0:3,0:3]
D = np.array([0., 0., 0., 0., 0.])
retval, rvecs, tvecs = cv2.solvePnP(objp, corners2, F, D)
rvecs = cv2.Rodrigues(rvecs)[0]
CameraTWorld = np.matrix([ [ rvecs[0,0], rvecs[0,1], rvecs[0,2], tvecs[0] ],
[ rvecs[1,0], rvecs[1,1], rvecs[1,2], tvecs[1] ],
[ rvecs[2,0], rvecs[2,1], rvecs[2,2], tvecs[2] ],
[ 0., 0., 0., 1. ]])
WorldState = np.asarray([
np.zeros(w*h),
np.zeros(w*h),
np.zeros(w*h),
np.ones(w*h),
])
for x in range(0, w):
for y in range(0, h):
idx = x*w + y
WorldState[0,idx] = objp[idx][0][0]
WorldState[1,idx] = objp[idx][0][1]
CameraState = CameraTWorld * WorldState
FinalMatrix = np.asarray(Transforms.F * CameraState)
CameraPixels = np.asarray([
(FinalMatrix[0,:]/FinalMatrix[2,:]),
(FinalMatrix[1,:]/FinalMatrix[2,:]),
])
CameraPixels[np.isnan(CameraPixels)] = 0.
CameraPixels = CameraPixels.astype(int)
for i, (x0, x1) in enumerate(zip(CameraPixels[0,:], CameraPixels[1,:])):
cv2.circle(cameraProcessed,(x0,x1), 8, (0,255,0), -1)
SonarState = Transforms.SonarTCamera * CameraState
SonarState = SonarState[0:3,:]
R = (np.sqrt(np.power(SonarState[0,:],2)+np.power(SonarState[1,:],2)+np.power(SonarState[2,:],2)))
SonarStatePolar = np.asarray([
R,
np.degrees(np.arctan2(SonarState[0,:],SonarState[2,:])),
np.degrees(np.arcsin(SonarState[1,:]/R))
])
SonarStatePolar = SonarStatePolar.reshape(3,9)
xs, ys = np.asarray([
SonarStatePolar[0,:]*np.sin(np.radians(SonarStatePolar[1,:])),
SonarStatePolar[0,:]*np.cos(np.radians(SonarStatePolar[1,:])),
])
docalcs = (SonarStatePolar[0,:] > Transforms.sonarMinRange) & (SonarStatePolar[0,:] < Transforms.sonarMaxRange) & (SonarStatePolar[1,:] < Transforms.sonarHOV)
SonarPixels = np.asarray([
np.zeros(w*h),
np.zeros(w*h)
])
for i, (x, y, docalc) in enumerate(zip(xs, ys, docalcs)):
if docalc:
SonarPixels[:,i] = sonar_xym_map(x,y)
else:
SonarPixels[:,i] = 0
SonarPixels = SonarPixels.astype(int)
for i, (x0, x1) in enumerate(zip(SonarPixels[0,:], SonarPixels[1,:])):
cv2.circle(sonarProcessed,(x0,x1), 5, (0,255,0), -1)
item={
"CameraImage": calibGray,
"SonarImage": sonarImage,
"CameraState": CameraState,
}
self.dataSet.append(item)
print len(self.dataSet)
# data["CameraProcessed"] = cameraProcessed.copy()
# data["SonarProcessed"] = sonarProcessed.copy()
if(len(self.dataSet) == 50):
print "Calibrate"
self.pf.dataSet = self.dataSet
for i in xrange(0,10):
self.pf.update_particles()
self.pf.calc_log_likelihood()
self.pf.resample_particles()
self.pf.compute_solution()
print i
del self.dataSet[:]
data["CameraProcessed"] = cameraProcessed.copy()
data["SonarProcessed"] = sonarProcessed.copy()
# def cartesian_to_polar(self, XYZ=(0.,0.,0.)):
# R = (np.sqrt(np.power(XYZ[0,:],2)+np.power(XYZ[1,:],2)+np.power(XYZ[2,:],2)))
# RThetaPhi = np.asarray([
# R,
# np.degrees(np.arctan2(XYZ[0,:],XYZ[2,:])),
# np.degrees(np.arcsin(XYZ[1,:]/R))
# ])
# return RThetaPhi
# def polar_to_cartesian(self, RThetaPhi=(0.,0.,0.)):
# XYZ = np.asarray([
# RThetaPhi[0,:]*np.cos(np.radians(RThetaPhi[2,:]))*np.sin(np.radians(RThetaPhi[1,:])),
# RThetaPhi[0,:]*np.sin(np.radians(RThetaPhi[2,:])),
# RThetaPhi[0,:]*np.cos(np.radians(RThetaPhi[2,:]))*np.cos(np.radians(RThetaPhi[1,:])),
# ])
# return XYZ
# def compute_sonarpixels(self, RThetaPhi=(0.,0.,0.)):
# xs, ys = np.asarray([
# RThetaPhi[0,:]*np.sin(np.radians(RThetaPhi[1,:])),
# RThetaPhi[0,:]*np.cos(np.radians(RThetaPhi[1,:])),
# ])
# docalcs = (RThetaPhi[0,:] > Transforms.sonarMinRange) & (RThetaPhi[0,:] < Transforms.sonarMaxRange) & (RThetaPhi[1,:] < Transforms.sonarHOV)
# SonarPixels = np.asarray([
# np.zeros(self.Npop_particles),
# np.zeros(self.Npop_particles)
# ])
# for i, (x, y, docalc) in enumerate(zip(xs, ys, docalcs)):
# if docalc:
# SonarPixels[:,i] = sonar_xym_map(x,y)
# else:
# SonarPixels[:,i] = 0
# SonarPixels = SonarPixels.astype(int)
# return SonarPixels
# def compute_camerapixels(self, XYZ=(0.,0.,0.)):
# FinalMatrix = np.asarray(Transforms.F * Transforms.CameraTSonar * np.vstack([XYZ, np.ones(self.Npop_particles)]))
# UVMatrix = np.asarray([
# (FinalMatrix[0,:]/FinalMatrix[2,:]),
# (FinalMatrix[1,:]/FinalMatrix[2,:]),
# ])
# UVMatrix[np.isnan(UVMatrix)] = 0.
# UVMatrix = UVMatrix.astype(int)
# return UVMatrix
# def compute_velocitypixels(self, XYZ=(0.,0.,0.), XYZVelocity=(0.,0.,0.)):
# XYZVelocity = np.asarray(Transforms.CameraTSonarRot * np.vstack([XYZVelocity, np.ones(self.Npop_particles)]))
# UVMatrix = np.asarray([
# Transforms.F[0,0]*((XYZ[0,:]*XYZVelocity[2,:]-XYZVelocity[0,:]*XYZ[2,:])/np.power(XYZ[2,:],2)),
# Transforms.F[1,1]*((XYZ[1,:]*XYZVelocity[2,:]-XYZVelocity[1,:]*XYZ[2,:])/np.power(XYZ[2,:],2)),
# ])
# UVMatrix[np.isnan(UVMatrix)] = 0.
# return UVMatrix
# def compute_solution(self):
# # Delete randomizers
# State = np.delete(self.State, self.randomized_select, axis=1)
# SonarPixels = np.delete(self.SonarPixels, self.randomized_select, axis=1)
# CameraPixels = np.delete(self.CameraPixels, self.randomized_select, axis=1)
# # Delete Zeros
# SonarPixels = SonarPixels.T[~np.all(SonarPixels.T == 0, axis=1)].T
# CameraPixels = CameraPixels.T[~np.all(CameraPixels.T == 0, axis=1)].T
# # Compute Mean
# self.AngleMean = np.mean(State[6:7,:], axis=1)
# self.StateMean = np.mean(State[0:3,:], axis=1)
# self.StateCovar = np.cov(State[0:3,:])
# self.CameraPixelsMean = np.mean(CameraPixels, axis=1)
# self.CameraPixelsMean = self.CameraPixelsMean.astype(int)
# self.SonarPixelsMean = np.mean(SonarPixels, axis=1)
# self.SonarPixelsMean = self.SonarPixelsMean.astype(int)
# self.VelocityPixelsMean = np.mean(self.VelocityPixels, axis=1)
# # Transform
# StateConv = np.array([np.hstack([self.StateMean, 1])]).T
# self.StateCartRobotFrame = Transforms.zxyTxyz * Transforms.FlatTRobot * Transforms.RobotTSonar * Transforms.xyzTzxy * StateConv
# self.StatePolarRobotFrame = self.cartesian_to_polar(self.StateCartRobotFrame)
# def draw_particles(self, cameraProcessed, sonarProcessed):
# # Draw sonar particles
# SonarPixels = np.delete(self.SonarPixels, self.randomized_select, axis=1)
# for i, (x0, x1) in enumerate(zip(SonarPixels[0,:], SonarPixels[1,:])):
# cv2.circle(sonarProcessed,(x0,x1), 2, (0,255,0), -1)
# # Draw camera particles
# CameraPixels = np.delete(self.CameraPixels, self.randomized_select, axis=1)
# for i, (x0, x1) in enumerate(zip(CameraPixels[0,:], CameraPixels[1,:])):
# cv2.circle(cameraProcessed,(x0,x1), 2, (0,255,0), -1)
# #Img.draw_arrow_vector(cameraProcessed, (x0,x1), (self.VelocityPixels[:,i][0]/10, self.VelocityPixels[:,i][1]/10), (0,255,255))
# # Draw Pixel means
# if self.CameraPixelsMean[0] >= 0:
# cv2.circle(cameraProcessed,(self.CameraPixelsMean[0],self.CameraPixelsMean[1]), 5, (0,255,255), -1)
# if self.SonarPixelsMean[0] >= 0:
# cv2.circle(sonarProcessed,(self.SonarPixelsMean[0],self.SonarPixelsMean[1]), 5, (0,255,255), -1)
# if self.VelocityPixelsMean[0] >= 0:
# Img.draw_arrow_vector(cameraProcessed, (self.CameraPixelsMean[0],self.CameraPixelsMean[1]), (self.VelocityPixelsMean[0]/10, self.VelocityPixelsMean[1]/10), (0,255,255))
if __name__ == '__main__':
rospy.init_node('Calibrate')
calibrate = Calibrate()
calibrate.start()
rospy.spin()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment