Я работаю над триангуляционными точками opencv и у меня есть некоторые проблемы. Вот моя работа:
ШАГ 1.
Я генерирую семь трехмерных точек с помощью функции generate3DPoints ()
ШАГ 2.
Затем я проецирую их на два изображения с помощью функции openCv projectPoints () и сохраняю 2D точки в векторах leftImagePoints и righttImagePoints соответственно.
Точка изображения: [0,5, 0,5, -0,5] Проецируется на [736,754, 618,17]
Точка изображения: [0,5, 0,5, 0,5] Проецируется на [731,375, 611,951]
Точка изображения: [-0,5, 0,5, 0,5] Проецируется на [688,719, 612,961]
Точка изображения: [-0,5, 0,5, -0,5] Проецируется на [692,913, 619,172]
Точка изображения: [0,5, -0,5, -0,5] Проецируется на [737,767, 573,217]
Точка изображения: [-0,5, -0,5, -0,5] Проецируется на [693,936, 574,331]
Точка изображения: [-0,5, -0,5, 0,5] Прогнозируется до [689,71, 569,285]
Точка изображения: [0,5, 0,5, -0,5] Проецируется на [702,397, -121,563]
Точка изображения: [0,5, 0,5, 0,5] Проецируется на [696,125, -93,1121]
Точка изображения: [-0,5, 0,5, 0,5] Проецируется на [632.271, -90.1316]
Точка изображения: [-0,5, 0,5, -0,5] Проецируется на [634,829, -116,987]
Точка изображения: [0,5, -0,5, -0,5] Проецируется на [715.505, -230.592]
Точка изображения: [-0,5, -0,5, -0,5] Проецируется на [642,35, -219,8]
Точка изображения: [-0,5, -0,5, 0,5] Проецируется на [638,094, -180,103]
ШАГ 3.
После этого я использую функцию opencv triangulatePoints, чтобы получить гомо-координаты и преобразовать их в обычные трехмерные координаты.
результат реконструкции
10,43599, 7,2594047, -33,088718;
11,009606, 7,6683388, -33,098804;
10,033145, 7,6832604, -33,375408;
9,5006475, 7,2904119, -33,379032;
9,5954504, 5,7358074, -32,76096;
8,7637157, 5,8084483, -33,068729;
9.3709002, 6.2525721, -33.122173
Теперь вы можете видеть: исходные трехмерные точки, которые я сам генерирую, отличаются от результата после проецирования и реконструкции. Я не могу найти проблему, я надеюсь, что вы можете помочь мне ~
СПАСИБО!
вот мой код (от opencv 2.4.9)
// testVirtualData.cpp :
//
#include "stdafx.h"
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;vector<Point3f> generate3DPoints()
{
vector<Point3f> pointsXYZ; // save 7 points
double x, y, z;
x = 0.5; y = 0.5; z = -0.5;
pointsXYZ.push_back(Point3f(x, y, z));
x = 0.5; y = 0.5; z = 0.5;
pointsXYZ.push_back(Point3f(x, y, z));
x = -0.5; y = 0.5; z = 0.5;
pointsXYZ.push_back(Point3f(x, y, z));
x = -0.5; y = 0.5; z = -0.5;
pointsXYZ.push_back(Point3f(x, y, z));
x = 0.5; y = -0.5; z = -0.5;
pointsXYZ.push_back(Point3f(x, y, z));
x = -0.5; y = -0.5; z = -0.5;
pointsXYZ.push_back(Point3f(x, y, z));
x = -0.5; y = -0.5; z = 0.5;
pointsXYZ.push_back(Point3f(x, y, z));
for (int i = 0; i < pointsXYZ.size(); i++)
{
cout << pointsXYZ[i] << endl;
}
return pointsXYZ;
}vector<Point3f> triangulateInOpenCV(Matx34d leftPMat, Matx34d rightPMat, vector<Point2f> leftPtsxy, vector<Point2f> rightPtsxy)
{
Mat corrPtsXYZHomo(4, leftPtsxy.size(), CV_64FC1);
triangulatePoints(leftPMat, rightPMat, leftPtsxy, rightPtsxy, corrPtsXYZHomo);
cout << "reconsturction result 3D points in homo-coordinate" << endl;
cout << corrPtsXYZHomo << endl;
vector<Point3f> corrPtsXYZ;
for (int i = 0; i < corrPtsXYZHomo.cols; i++)
{
float x = corrPtsXYZHomo.at<float>(0, i) / corrPtsXYZHomo.at<float>(3, i);
float y = corrPtsXYZHomo.at<float>(1, i) / corrPtsXYZHomo.at<float>(3, i);
float z = corrPtsXYZHomo.at<float>(2, i) / corrPtsXYZHomo.at<float>(3, i);
corrPtsXYZ.push_back(Point3f(x, y, z));
int t = 1;
}
return corrPtsXYZ;
}int _tmain(int argc, _TCHAR* argv[])
{
vector<Point3f> objectPoints = generate3DPoints(); //generate by myself
vector<Point2f> rightImagePoints; // save project
vector<Point2f> leftImagePoints; // save project result// 1. intrinsic Matrix
Mat intrisicMat(3, 3, DataType<double>::type);
intrisicMat.at<double>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<double>(1, 0) = 0;
intrisicMat.at<double>(2, 0) = 0;
intrisicMat.at<double>(0, 1) = 0;
intrisicMat.at<double>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<double>(2, 1) = 0;
intrisicMat.at<double>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<double>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<double>(2, 2) = 1;// 2.3. R T
// left
double leftRMatArray[] =
{
1, 0, 0,
0, 1, 0,
0, 0, 1
};
Mat leftRMat = Mat(3, 3, CV_64FC1, leftRMatArray); //Rotation Matrix
Mat leftRVec(3, 1, DataType<double>::type); // Rotation vector
Rodrigues(leftRMat, leftRVec);
Mat leftTVec(3, 1, DataType<double>::type); // Translation vector
leftTVec.at<double>(0) = 4.1158489381208221e+000;
leftTVec.at<double>(1) = 4.6847683212704716e+000;
leftTVec.at<double>(2) = 3.6169795190294256e+001;
//leftTVec.at<double>(0) = 0;
//leftTVec.at<double>(1) = 0;
//leftTVec.at<double>(2) = 0;
// right
Mat rightRVec(3, 1, DataType<double>::type); // Rotation vector
rightRVec.at<double>(0) = -3.9277902400761393e-002;
rightRVec.at<double>(1) = 3.7803824407602084e-002;
rightRVec.at<double>(2) = 2.6445674487856268e-002;Mat rightRMat; // Rotation Matrix
Rodrigues(rightRVec, rightRMat);
Mat rightTVec(3, 1, DataType<double>::type); // Translation vector
rightTVec.at<double>(0) = 2.1158489381208221e+000;
rightTVec.at<double>(1) = -7.6847683212704716e+000;
rightTVec.at<double>(2) = 2.6169795190294256e+001;// 4. distortion
Mat distCoeffs(5, 1, DataType<double>::type); // Distortion vector
distCoeffs.at<double>(0) = -7.9134632415085826e-001;
distCoeffs.at<double>(1) = 1.5623584435644169e+000;
distCoeffs.at<double>(2) = -3.3916502741726508e-002;
distCoeffs.at<double>(3) = -1.3921577146136694e-002;
distCoeffs.at<double>(4) = 1.1430734623697941e+002;
cout << "Intrisic matrix: " << intrisicMat << endl << endl;
cout << "Distortion coef: " << distCoeffs << endl << endl;
cout << "left Rotation vector: " << leftRVec << endl << endl;
cout << "left Translation vector: " << leftTVec << endl << endl;
cout << "right Rotation vector: " << rightRVec << endl << endl;
cout << "right Translation vector: " << rightTVec << endl << endl;
// project
// left
projectPoints(objectPoints, leftRVec, leftTVec, intrisicMat, distCoeffs, leftImagePoints);
// right
projectPoints(objectPoints, rightRVec, rightTVec, intrisicMat, distCoeffs, rightImagePoints);
for (int i = 0; i < leftImagePoints.size(); ++i)
{
cout << "Image point: " << objectPoints[i] << " Projected to " << leftImagePoints[i] << endl;
}
cout << "------------------" << endl;
for (int i = 0; i < rightImagePoints.size(); ++i)
{
cout << "Image point: " << objectPoints[i] << " Projected to " << rightImagePoints[i] << endl;
}
//triangulate
double leftPArray[] =
{
leftRMat.at<double>(0, 0), leftRMat.at<double>(0, 1), leftRMat.at<double>(0, 2), leftTVec.at<double>(0),
leftRMat.at<double>(1, 0), leftRMat.at<double>(1, 1), leftRMat.at<double>(1, 2), leftTVec.at<double>(1),
leftRMat.at<double>(2, 0), leftRMat.at<double>(2, 1), leftRMat.at<double>(2, 2), leftTVec.at<double>(2)
};
Mat leftPMat = Mat(3, 4, CV_64FC1, leftPArray); // left P Matrix
double rightPArray[] =
{
rightRMat.at<double>(0, 0), rightRMat.at<double>(0, 1), rightRMat.at<double>(0, 2), rightTVec.at<double>(0),
rightRMat.at<double>(1, 0), rightRMat.at<double>(1, 1), rightRMat.at<double>(1, 2), rightTVec.at<double>(1),
rightRMat.at<double>(2, 0), rightRMat.at<double>(2, 1), rightRMat.at<double>(2, 2), rightTVec.at<double>(2)
};
Mat rightPMat = Mat(3, 4, CV_64FC1, rightPArray); // right P Matrix
vector<Point3f> triangulationResult = triangulateInOpenCV(leftPMat, rightPMat, leftImagePoints, rightImagePoints);
cout << "reconstruction result" << endl;
cout << triangulationResult << endl;
return 0;
}
Ваш расчет матрицы проекции не совсем верно. Давайте проигнорируем искажение линзы, чтобы облегчить жизнь. Если A1 и A2 представляют внутренние параметры двух камер, P1 = A1 * [R1 | t1] и P2 = A2 * [R2 | t2] представляют матрицы проекций для левой и правой камер. Вам также может понадобиться немного изменить свой код, чтобы он содержал только двойные (или плавающие) значения. С этими изменениями я получил
[ 0.5000000000000006, 0.4999999999999996, -0.4999999999999953;
0.4999999999999991, 0.5000000000000002, 0.5000000000000033;
-0.5000000000000008, 0.5000000000000003, 0.5000000000000016;
-0.4999999999999995, 0.4999999999999996, -0.4999999999999952;
0.5000000000000002, -0.4999999999999998, -0.4999999999999991;
-0.4999999999999993, -0.4999999999999998, -0.5000000000000001;
-0.5000000000000012, -0.5000000000000003, 0.4999999999999947]
что близко к входным точкам. И вот код:
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;vector<Point3d> generate3DPoints()
{
vector<Point3d> pointsXYZ; // save 7 points
double x, y, z;
x = 0.5; y = 0.5; z = -0.5;
pointsXYZ.push_back(Point3d(x, y, z));
x = 0.5; y = 0.5; z = 0.5;
pointsXYZ.push_back(Point3d(x, y, z));
x = -0.5; y = 0.5; z = 0.5;
pointsXYZ.push_back(Point3d(x, y, z));
x = -0.5; y = 0.5; z = -0.5;
pointsXYZ.push_back(Point3d(x, y, z));
x = 0.5; y = -0.5; z = -0.5;
pointsXYZ.push_back(Point3d(x, y, z));
x = -0.5; y = -0.5; z = -0.5;
pointsXYZ.push_back(Point3d(x, y, z));
x = -0.5; y = -0.5; z = 0.5;
pointsXYZ.push_back(Point3d(x, y, z));
for (int i = 0; i < pointsXYZ.size(); i++)
{
cout << pointsXYZ[i] << endl;
}
return pointsXYZ;
}vector<Point3d> triangulateInOpenCV(Matx34d leftPMat, Matx34d rightPMat, vector<Point2d> leftPtsxy, vector<Point2d> rightPtsxy)
{
Mat corrPtsXYZHomo(4, leftPtsxy.size(), CV_64FC1);
triangulatePoints(leftPMat, rightPMat, leftPtsxy, rightPtsxy, corrPtsXYZHomo);
cout << "reconsturction result 3D points in homo-coordinate" << endl;
cout << corrPtsXYZHomo << endl;
vector<Point3d> corrPtsXYZ;
for (int i = 0; i < corrPtsXYZHomo.cols; i++)
{
double x = corrPtsXYZHomo.at<double>(0, i) / corrPtsXYZHomo.at<double>(3, i);
double y = corrPtsXYZHomo.at<double>(1, i) / corrPtsXYZHomo.at<double>(3, i);
double z = corrPtsXYZHomo.at<double>(2, i) / corrPtsXYZHomo.at<double>(3, i);
corrPtsXYZ.push_back(Point3d(x, y, z));
int t = 1;
}
return corrPtsXYZ;
}int main(int argc, char* argv[])
{
vector<Point3d> objectPoints = generate3DPoints(); //generate by myself
vector<Point2d> rightImagePoints; // save project
vector<Point2d> leftImagePoints; // save project result// 1. intrinsic Matrix
Mat intrisicMat(3, 3, DataType<double>::type);
intrisicMat.at<double>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<double>(1, 0) = 0;
intrisicMat.at<double>(2, 0) = 0;
intrisicMat.at<double>(0, 1) = 0;
intrisicMat.at<double>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<double>(2, 1) = 0;
intrisicMat.at<double>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<double>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<double>(2, 2) = 1;// 2.3. R T
// left
double leftRMatArray[] =
{
1, 0, 0,
0, 1, 0,
0, 0, 1
};
Mat leftRMat = Mat(3, 3, CV_64FC1, leftRMatArray); //Rotation Matrix
Mat leftRVec(3, 1, DataType<double>::type); // Rotation vector
Rodrigues(leftRMat, leftRVec);
Mat leftTVec(3, 1, DataType<double>::type); // Translation vector
leftTVec.at<double>(0) = 4.1158489381208221e+000;
leftTVec.at<double>(1) = 4.6847683212704716e+000;
leftTVec.at<double>(2) = 3.6169795190294256e+001;
// right
Mat rightRVec(3, 1, DataType<double>::type); // Rotation vector
rightRVec.at<double>(0) = -3.9277902400761393e-002;
rightRVec.at<double>(1) = 3.7803824407602084e-002;
rightRVec.at<double>(2) = 2.6445674487856268e-002;Mat rightRMat; // Rotation Matrix
Rodrigues(rightRVec, rightRMat);
Mat rightTVec(3, 1, DataType<double>::type); // Translation vector
rightTVec.at<double>(0) = 2.1158489381208221e+000;
rightTVec.at<double>(1) = -7.6847683212704716e+000;
rightTVec.at<double>(2) = 2.6169795190294256e+001;// 4. distortion
Mat distCoeffs(5, 1, DataType<double>::type); // Distortion vector
distCoeffs.at<double>(0) = 0;// -7.9134632415085826e-001;
distCoeffs.at<double>(1) = 0;//1.5623584435644169e+000;
distCoeffs.at<double>(2) = 0;//-3.3916502741726508e-002;
distCoeffs.at<double>(3) = 0;//-1.3921577146136694e-002;
distCoeffs.at<double>(4) = 0;//1.1430734623697941e+002;
cout << "Intrisic matrix: " << intrisicMat << endl << endl;
cout << "Distortion coef: " << distCoeffs << endl << endl;
cout << "left Rotation vector: " << leftRVec << endl << endl;
cout << "left Translation vector: " << leftTVec << endl << endl;
cout << "right Rotation vector: " << rightRVec << endl << endl;
cout << "right Translation vector: " << rightTVec << endl << endl;
// project
// left
projectPoints(objectPoints, leftRVec, leftTVec, intrisicMat, distCoeffs, leftImagePoints);
// right
projectPoints(objectPoints, rightRVec, rightTVec, intrisicMat, distCoeffs, rightImagePoints);
for (int i = 0; i < leftImagePoints.size(); ++i)
{
cout << "Image point: " << objectPoints[i] << " Projected to " << leftImagePoints[i] << endl;
}
cout << "------------------" << endl;
for (int i = 0; i < rightImagePoints.size(); ++i)
{
cout << "Image point: " << objectPoints[i] << " Projected to " << rightImagePoints[i] << endl;
}
Mat m1 = intrisicMat * leftRMat;
Mat t1 = intrisicMat * leftTVec;
//triangulate
double leftPArray[] =
{
m1.at<double>(0, 0),m1.at<double>(0, 1),m1.at<double>(0, 2), t1.at<double>(0,0),
m1.at<double>(1, 0),m1.at<double>(1, 1),m1.at<double>(1, 2), t1.at<double>(1,0),
m1.at<double>(2, 0),m1.at<double>(2, 1),m1.at<double>(2, 2), t1.at<double>(2,0)
};
Mat leftPMat = Mat(3, 4, CV_64FC1, leftPArray); // left P Matrix
Mat m2 = intrisicMat * rightRMat;
Mat t2 = intrisicMat * rightTVec;
double rightPArray[] =
{
m2.at<double>(0, 0), m2.at<double>(0, 1), m2.at<double>(0, 2), t2.at<double>(0,0),
m2.at<double>(1, 0), m2.at<double>(1, 1), m2.at<double>(1, 2), t2.at<double>(1,0),
m2.at<double>(2, 0), m2.at<double>(2, 1), m2.at<double>(2, 2), t2.at<double>(2,0)
};
Mat rightPMat = Mat(3, 4, CV_64FC1, rightPArray); // right P Matrix
vector<Point3d> triangulationResult = triangulateInOpenCV(leftPMat, rightPMat, leftImagePoints, rightImagePoints);
cout << "reconstruction result" << endl;
cout << triangulationResult << endl;
cin.get();
return 0;
}
Других решений пока нет …