-
-
Save anujonthemove/7b35b7c1e05f01dd11d74d94784c1e58 to your computer and use it in GitHub Desktop.
import cv2 | |
import numpy as np | |
import math | |
PI = 3.1415926 | |
frameWidth = 640 | |
frameHeight = 480 | |
def update_perspective(val): | |
alpha = (cv2.getTrackbarPos("Alpha", "Result") - 90) * PI / 180 | |
beta = (cv2.getTrackbarPos("Beta", "Result") - 90) * PI / 180 | |
gamma = (cv2.getTrackbarPos("Gamma", "Result") - 90) * PI / 180 | |
focalLength = cv2.getTrackbarPos("f", "Result") | |
dist = cv2.getTrackbarPos("Distance", "Result") | |
image_size = (frameWidth, frameHeight) | |
w, h = image_size | |
A1 = np.array([[1, 0, -w / 2], | |
[0, 1, -h / 2], | |
[0, 0, 0], | |
[0, 0, 1]], dtype=np.float32) | |
RX = np.array([[1, 0, 0, 0], | |
[0, math.cos(alpha), -math.sin(alpha), 0], | |
[0, math.sin(alpha), math.cos(alpha), 0], | |
[0, 0, 0, 1]], dtype=np.float32) | |
RY = np.array([[math.cos(beta), 0, -math.sin(beta), 0], | |
[0, 1, 0, 0], | |
[math.sin(beta), 0, math.cos(beta), 0], | |
[0, 0, 0, 1]], dtype=np.float32) | |
RZ = np.array([[math.cos(gamma), -math.sin(gamma), 0, 0], | |
[math.sin(gamma), math.cos(gamma), 0, 0], | |
[0, 0, 1, 0], | |
[0, 0, 0, 1]], dtype=np.float32) | |
R = np.dot(np.dot(RX, RY), RZ) | |
T = np.array([[1, 0, 0, 0], | |
[0, 1, 0, 0], | |
[0, 0, 1, dist], | |
[0, 0, 0, 1]], dtype=np.float32) | |
K = np.array([[focalLength, 0, w / 2, 0], | |
[0, focalLength, h / 2, 0], | |
[0, 0, 1, 0]], dtype=np.float32) | |
transformationMat = np.dot(np.dot(np.dot(K, T), R), A1) | |
destination = cv2.warpPerspective(source, transformationMat, image_size, flags=cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP) | |
cv2.imshow("Result", destination) | |
source = cv2.imread('frame.jpg') # Replace with your image file path | |
cv2.namedWindow("Result", cv2.WINDOW_NORMAL) | |
cv2.createTrackbar("Alpha", "Result", 90, 180, update_perspective) | |
cv2.createTrackbar("Beta", "Result", 90, 180, update_perspective) | |
cv2.createTrackbar("Gamma", "Result", 90, 180, update_perspective) | |
cv2.createTrackbar("f", "Result", 500, 2000, update_perspective) | |
cv2.createTrackbar("Distance", "Result", 500, 2000, update_perspective) | |
update_perspective(0) | |
cv2.waitKey(0) | |
cv2.destroyAllWindows() |
import cv2 | |
import numpy as np | |
import math | |
PI = 3.1415926 | |
frameWidth = 640 | |
frameHeight = 480 | |
def update_perspective(val): | |
alpha = (cv2.getTrackbarPos("Alpha", "Result") - 90) * PI / 180 | |
beta = (cv2.getTrackbarPos("Beta", "Result") - 90) * PI / 180 | |
gamma = (cv2.getTrackbarPos("Gamma", "Result") - 90) * PI / 180 | |
focalLength = cv2.getTrackbarPos("f", "Result") | |
dist = cv2.getTrackbarPos("Distance", "Result") | |
image_size = (frameWidth, frameHeight) | |
w, h = image_size | |
A1 = np.array([[1, 0, -w / 2], | |
[0, 1, -h / 2], | |
[0, 0, 0], | |
[0, 0, 1]], dtype=np.float32) | |
RX = np.array([[1, 0, 0, 0], | |
[0, math.cos(alpha), -math.sin(alpha), 0], | |
[0, math.sin(alpha), math.cos(alpha), 0], | |
[0, 0, 0, 1]], dtype=np.float32) | |
RY = np.array([[math.cos(beta), 0, -math.sin(beta), 0], | |
[0, 1, 0, 0], | |
[math.sin(beta), 0, math.cos(beta), 0], | |
[0, 0, 0, 1]], dtype=np.float32) | |
RZ = np.array([[math.cos(gamma), -math.sin(gamma), 0, 0], | |
[math.sin(gamma), math.cos(gamma), 0, 0], | |
[0, 0, 1, 0], | |
[0, 0, 0, 1]], dtype=np.float32) | |
R = np.dot(np.dot(RX, RY), RZ) | |
T = np.array([[1, 0, 0, 0], | |
[0, 1, 0, 0], | |
[0, 0, 1, dist], | |
[0, 0, 0, 1]], dtype=np.float32) | |
K = np.array([[focalLength, 0, w / 2, 0], | |
[0, focalLength, h / 2, 0], | |
[0, 0, 1, 0]], dtype=np.float32) | |
transformationMat = np.dot(np.dot(np.dot(K, T), R), A1) | |
ret, frame = capture.read() | |
if not ret: | |
return | |
destination = cv2.warpPerspective(frame, transformationMat, image_size, flags=cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP) | |
cv2.imshow("Result", destination) | |
frameWidth = 640 | |
frameHeight = 480 | |
capture = cv2.VideoCapture(filename) # Replace with your video file path | |
cv2.namedWindow("Result", cv2.WINDOW_NORMAL) | |
cv2.createTrackbar("Alpha", "Result", 90, 180, update_perspective) | |
cv2.createTrackbar("Beta", "Result", 90, 180, update_perspective) | |
cv2.createTrackbar("Gamma", "Result", 90, 180, update_perspective) | |
cv2.createTrackbar("f", "Result", 500, 2000, update_perspective) | |
cv2.createTrackbar("Distance", "Result", 500, 2000, update_perspective) | |
while True: | |
update_perspective(0) | |
if cv2.waitKey(1) & 0xFF == 27: # Press 'Esc' to exit | |
break | |
capture.release() | |
cv2.destroyAllWindows() |
// OpenCV imports | |
#include <opencv2/imgproc/imgproc.hpp> | |
#include <opencv2/highgui/highgui.hpp> | |
// C++ imports | |
#include <iostream> | |
// namespaces | |
using namespace std; | |
using namespace cv; | |
#define PI 3.1415926 | |
int frameWidth = 640; | |
int frameHeight = 480; | |
/* | |
* This code illustrates bird's eye view perspective transformation using opencv | |
* Paper: Distance Determination for an Automobile Environment using Inverse Perspective Mapping in OpenCV | |
* Link to paper: https://www.researchgate.net/publication/224195999_Distance_determination_for_an_automobile_environment_using_Inverse_Perspective_Mapping_in_OpenCV | |
* Code taken from: http://www.aizac.info/birds-eye-view-homography-using-opencv/ | |
*/ | |
int main(int argc, char const *argv[]) { | |
if(argc < 2) { | |
cerr << "Usage: " << argv[0] << " /path/to/video/" << endl; | |
cout << "Exiting...." << endl; | |
return -1; | |
} | |
// get file name from the command line | |
string filename = argv[1]; | |
// capture object | |
VideoCapture capture(filename); | |
// mat container to receive images | |
Mat source, destination; | |
// check if capture was successful | |
if( !capture.isOpened()) throw "Error reading video"; | |
int alpha_ = 90, beta_ = 90, gamma_ = 90; | |
int f_ = 500, dist_ = 500; | |
namedWindow("Result", 1); | |
createTrackbar("Alpha", "Result", &alpha_, 180); | |
createTrackbar("Beta", "Result", &beta_, 180); | |
createTrackbar("Gamma", "Result", &gamma_, 180); | |
createTrackbar("f", "Result", &f_, 2000); | |
createTrackbar("Distance", "Result", &dist_, 2000); | |
while( true ) { | |
capture >> source; | |
resize(source, source,Size(frameWidth, frameHeight)); | |
double focalLength, dist, alpha, beta, gamma; | |
alpha =((double)alpha_ -90) * PI/180; | |
beta =((double)beta_ -90) * PI/180; | |
gamma =((double)gamma_ -90) * PI/180; | |
focalLength = (double)f_; | |
dist = (double)dist_; | |
Size image_size = source.size(); | |
double w = (double)image_size.width, h = (double)image_size.height; | |
// Projecion matrix 2D -> 3D | |
Mat A1 = (Mat_<float>(4, 3)<< | |
1, 0, -w/2, | |
0, 1, -h/2, | |
0, 0, 0, | |
0, 0, 1 ); | |
// Rotation matrices Rx, Ry, Rz | |
Mat RX = (Mat_<float>(4, 4) << | |
1, 0, 0, 0, | |
0, cos(alpha), -sin(alpha), 0, | |
0, sin(alpha), cos(alpha), 0, | |
0, 0, 0, 1 ); | |
Mat RY = (Mat_<float>(4, 4) << | |
cos(beta), 0, -sin(beta), 0, | |
0, 1, 0, 0, | |
sin(beta), 0, cos(beta), 0, | |
0, 0, 0, 1 ); | |
Mat RZ = (Mat_<float>(4, 4) << | |
cos(gamma), -sin(gamma), 0, 0, | |
sin(gamma), cos(gamma), 0, 0, | |
0, 0, 1, 0, | |
0, 0, 0, 1 ); | |
// R - rotation matrix | |
Mat R = RX * RY * RZ; | |
// T - translation matrix | |
Mat T = (Mat_<float>(4, 4) << | |
1, 0, 0, 0, | |
0, 1, 0, 0, | |
0, 0, 1, dist, | |
0, 0, 0, 1); | |
// K - intrinsic matrix | |
Mat K = (Mat_<float>(3, 4) << | |
focalLength, 0, w/2, 0, | |
0, focalLength, h/2, 0, | |
0, 0, 1, 0 | |
); | |
Mat transformationMat = K * (T * (R * A1)); | |
warpPerspective(source, destination, transformationMat, image_size, INTER_CUBIC | WARP_INVERSE_MAP); | |
imshow("Result", destination); | |
waitKey(100); | |
} | |
return 0; | |
} |
Hi, What is the purpose of your Matrix A1? You mention it transforms 2D into 3D, right? How? and why you do the negative of half the width and height of the image? Isn't matrix A1 a linear transformation that implies a translation of the image?
hi, did you find answers?
Hi, What is the purpose of your Matrix A1? You mention it transforms 2D into 3D, right? How? and why you do the negative of half the width and height of the image? Isn't matrix A1 a linear transformation that implies a translation of the image?
hi, did you find answers?
Hi, No I didn't.
Hi, What is the purpose of your Matrix A1? You mention it transforms 2D into 3D, right? How? and why you do the negative of half the width and height of the image? Isn't matrix A1 a linear transformation that implies a translation of the image?
hi, did you find answers?
Hi, No I didn't.
I think you should take a look at this, I am still working on it, but this is worked to me.
https://stackoverflow.com/questions/48576087/birds-eye-view-perspective-transformation-from-camera-calibration-opencv-python
https://docs.opencv.org/3.4.0/d9/dab/tutorial_homography.html#tutorial_homography_Demo3
hi, did you find answers?