ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

基于opencv下的双目视觉三维重建与重投影残差计算

2019-03-26 21:55:16  阅读:698  来源: 互联网

标签:knn Mat matches double 残差 opencv matrix 三维重建 define


一、重投影残差

1.1基本概念

重投影残差,按照字面意思解释就是在第二次重新投影的二维像素点与第一次投影得到的像素点之间的残差(欧式距离)大小,残差的大小反映的就是测量仪器的精度以及外界因素对测量结果的干扰,即测量误差的大小。

如下图所示:

假设P是世界坐标系中的一个三维点(真实物点),第一次投影指的就是双目相机C1、C2拍摄时,P点投影到二维像平面上所捕获到的图像点,即图中的P1和P2点;然后利用双目视觉原理,针对P1、P2这对匹配点以及标定求出的内外、畸变参数等等,可以求出一个三维点(理论值);然后利用求出来的三维点与相机的位姿关系进行第二次投影得到P’2点;一般用重投影点P’2与获取的二维像点P2的欧式距离e值的大小反映的就是重投影误差的大小。
在这里插入图片描述

二、关键坐标系之间的转换

2.1 像素坐标系与图像坐标系

在这里插入图片描述
(u , v)表示的是像素坐标,单位Pixel;
(x , y)表示的是图像坐标,单位mm;
(u0 , v0)表示的是光心位置,有时候表示为(Cx,Cy);
dx , dy分别表示的是每个像素在横纵坐标的实际物理尺寸,单位mm;

2.2 图像坐标系与相机坐标系

在这里插入图片描述
在这里插入图片描述
以上公式最后的f,根据标定数据,在x和y方向分别以fx和fy代入,注意这里的fx和fy表示的是实际物理长度,单位mm,转换过程(三维坐标->像素坐标)如下图所示:
在这里插入图片描述

2.3 残差计算

(u1,v1)记为获取的二维图像的匹配点,通过畸变矫正(undistortPoints())之后的结果设为(x,y),设(u,v)为矫正之后的像素坐标
u = x * fx + Cx
v = y * fy + Cy
最后将得到的(u,v)和上式中的(u’,v’)做欧式计算,结果为残差值的大小

code:

// NewPointClouds.cpp: 定义控制台应用程序的入口点。
//
// MatchExperiment.cpp: 定义控制台应用程序的入口点。
//

#include "stdafx.h"
#include"iostream"
#include"opencv.hpp"
#include"highgui.hpp"
#include"xfeatures2d.hpp"

#define La1 -0.0765583
#define La2 1.12963
#define La3 -0.000478566
#define La4 -0.00934149   //左相机的四个畸变参数
#define Ra1 -0.0564752
#define Ra2 0.598481
#define Ra3 0.00219212
#define Ra4 -0.000801994  //右相机的四个畸变参数
#define Lcx 563.737           
#define Lcy 449.899    //左相机的光心位置
#define Rcx 638.157
#define Rcy 478.782   //右相机的光心位置
#define fx1 3317.41
#define fy1 3318.89   //左相机的焦距
#define fx2 3346.03
#define fy2 3347.61   //右相机的焦距

using namespace cv;
using namespace std;

int main()
{
	Mat imageL0 = imread("Pattenimage_L.bmp");
	Mat imageR0 = imread("Pattenimage_R.bmp");
	//Mat imageL1, imageR1;
	//GaussianBlur(imageL0, imageL1, Size(3, 3), 0.5);
	//GaussianBlur(imageR0, imageR1, Size(3, 3), 0.5);
	ofstream file,debug;
	file.open("PointClouds.txt");
	debug.open("debug.txt");

	Mat dist1 = (Mat_<float>(5, 1) << La1, La2 ,La3 , La4 , 0); //左相机畸变参数(k1,k2,p1,p2,k3)
	Mat dist2 = (Mat_<float>(5, 1) << Ra1, Ra2, Ra3, Ra4, 0); //右相机畸变参数

	double m1[3][3] = { { fx1,0,Lcx },{ 0,fy1,Lcy },{ 0,0,1 } };
	Mat m1_matrix(Size(3, 3), CV_64F, m1);  //左相机的内参矩阵
								
	double m2[3][3] = { { fx2,0,Rcx },{ 0,fy2,Rcy },{ 0,0,1 } };
	Mat m2_matrix(Size(3, 3), CV_64F, m2);  //右相机的内参矩阵
	float r2[3][1] = { { 0.00778951 },{ -0.121633 },{ 0.0150494 } }; 
	Mat r2_src2(Size(1, 3), CV_32F, r2);
	Mat r2_matrix(Size(3, 3), CV_64F); //右相机旋转向量
	Rodrigues(r2_src2, r2_matrix);   //旋转向量转化为旋转矩阵
	Mat t2_matrix = (Mat_<double>(3, 1) << 105.017, 2.22392, 19.288);   //右相机平移向量

	float M1[3][3] = { { fx1,0,Lcx },{ 0,fy1,Lcy },{ 0,0,1 } };
	Mat M1_matrix(Size(3, 3), CV_32F, M1);  //左相机的内参矩阵

	float M2[3][3] = { { fx2,0,Rcx },{ 0,fy2,Rcy },{ 0,0,1 } };
	Mat M2_matrix(Size(3, 3), CV_32F, M2);  //右相机的内参矩阵

	Mat R1 = Mat::eye(3, 3, CV_32F);     //左相机图像畸变矫正
	Mat map1x = Mat(imageL0.size(), CV_32FC1);
	Mat map1y = Mat(imageL0.size(), CV_32FC1);
	initUndistortRectifyMap(M1_matrix, dist1, R1, M1_matrix, imageL0.size(), CV_32FC1, map1x, map1y);
	Mat picture1 = imageL0.clone();
	remap(imageL0, picture1, map1x, map1y, INTER_LINEAR);

	Mat R2 = Mat::eye(3, 3, CV_32F);     //右相机图像畸变矫正
	Mat map2x = Mat(imageR0.size(), CV_32FC1);
	Mat map2y = Mat(imageR0.size(), CV_32FC1);
	initUndistortRectifyMap(M2_matrix, dist2, R2, M2_matrix, imageR0.size(), CV_32FC1, map2x, map2y);
	Mat picture2 = imageR0.clone();
	remap(imageR0, picture2, map2x, map2y, INTER_LINEAR);

	//imshow("矫正后左图", picture1);
	//imshow("矫正后右图", picture2);

	//SIFT找出特征点
	Ptr<Feature2D>f2d = xfeatures2d::SIFT::create(0, 3, 0.04, 10); 
	vector<KeyPoint>keyPoint1, keyPoint2;
	Mat descriptors1, descriptors2;

	f2d->detectAndCompute(picture1, noArray(), keyPoint1, descriptors1);
	f2d->detectAndCompute(picture2, noArray(), keyPoint2, descriptors2);

	drawKeypoints(picture1, keyPoint1, picture1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	drawKeypoints(picture2, keyPoint2, picture2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	vector<DMatch>matches1;
	BFMatcher matcher1(NORM_L2);
	matcher1.match(descriptors1, descriptors2, matches1);

	Mat img_before;
	drawMatches(picture1, keyPoint1, picture2, keyPoint2, matches1, img_before);
	imshow("误匹配消除前", img_before);


	//用knn算法获取与该特征点最匹配的两个特征点
	vector<DMatch>matches;

	vector<vector<DMatch>>knn_matches;
	BFMatcher matcher(NORM_L2);
	
	//设置K = 2 ,即对每个匹配返回两个最近邻描述符,仅当第一个匹配与第二个匹配之间的距离足够小时,才认为这是一个匹配
	matcher.knnMatch(descriptors1, descriptors2, knn_matches, 2);
	
	//获取满足Ration Test的最小匹配距离
	float min_dist = FLT_MAX;
	for (int r = 0; r < knn_matches.size(); ++r) {
		//Ratio Test
		if (knn_matches[r][0].distance > 0.6*knn_matches[r][1].distance)
			continue;

		float dist = knn_matches[r][0].distance;
		if (dist < min_dist)
			min_dist = dist;
	}

	for (size_t r = 0; r < knn_matches.size(); r++) {
		//排除不满足Ratio Test的点和匹配距离过大的点
		if (
			knn_matches[r][0].distance > 0.6*knn_matches[r][1].distance ||
			knn_matches[r][0].distance > 5 * max(min_dist, 10.0f)
			)
			continue;
		matches.push_back(knn_matches[r][0]);
	}
	Mat img_mid;
	drawMatches(picture1, keyPoint1, picture2, keyPoint2, matches, img_mid);
	imshow("误匹配排除后", img_mid);

	vector<Point2f>points1, points2;
	for (vector<DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {     //容纳两个关键点匹配的结构
		points1.push_back(keyPoint1[it->queryIdx].pt);  //取得左侧关键点的位置
		points2.push_back(keyPoint2[it->trainIdx].pt);  //取得右侧关键点的位置
	}

	//RANSAC计算单应变换矩阵排除错误匹配点对
	double distance = 1;   //点到对极线的最大距离
	double confidence = 0.98;    //矩阵正确的可信度
	vector<uchar>inliers(points1.size(), 0);
	Mat fundaental = findFundamentalMat(points1, points2, inliers, CV_FM_RANSAC, distance, confidence);
	//RANSAC方法,到对极线的距离,可信度,匹配的点,匹配状态(局内项或局外项)

	//提取存留的局内项
	vector<uchar>::const_iterator itIn = inliers.begin();
	vector<DMatch>::const_iterator itM = matches.begin();

	//遍历全部匹配项
	vector<DMatch>Matches;
	for (; itIn != inliers.end(); ++itIn, ++itM) {
		if (*itIn) {  //有效的匹配项
			Matches.push_back(*itM);
		}
	}

	Mat imageOutput;
	drawMatches(picture1, keyPoint1, picture2, keyPoint2, Matches, imageOutput, Scalar::all(-1));

	namedWindow("picture of matching");
	imshow("picture of matching", imageOutput);
	Mat R2_matrix(Size(3, 3), CV_64F); //右相机旋转向量
	Mat R2_src = (Mat_<double>(3, 1) << 0.00778951, -0.121633, 0.0150494);
	Rodrigues(R2_src, R2_matrix);   //旋转向量转化为旋转矩阵
	Mat T2_matrix = (Mat_<double>(3, 1) << 105.017, 2.22392, 19.288);   //右相机平移向量


	Mat _src1(points1.size(), 1, CV_32FC2);
	Mat _src2(points1.size(), 1, CV_32FC2);
	
	Vec2f dd;
	for (int i = 0; i < points1.size(); i++) {
		dd[0] = points1[i].x;
		dd[1] = points1[i].y;
		_src1.at<Vec2f>(i, 0) = dd;
	}
	for (int i = 0; i < points2.size(); i++) {
		dd[0] = points2[i].x;
		dd[1] = points2[i].y;
		_src2.at<Vec2f>(i, 0) = dd;
	}

	Mat _dst1;
	Mat _dst2;

	//畸变矫正(将像素坐标转换为了图像坐标)
	undistortPoints(_src1, _dst1, m1_matrix, dist1); //校正后的坐标需要乘以焦距+中心坐标
	undistortPoints(_src2, _dst2, m2_matrix, dist2);
	
	//存储二维像素点
	ofstream PointsL;
	PointsL.open("PointsL.txt");
	ofstream PointsR;
	PointsR.open("PointsR.txt");

	Vec2f ee,ef;
	for (size_t i = 0; i < points1.size(); i++) {
		ee = _dst1.at<Vec2f>(i, 0);
		PointsL << ee[0] << " " << ee[1] << endl;

		ef = _dst2.at<Vec2f>(i, 0);
		PointsR << ef[0] << " " << ef[1] << endl;

	}

	//立体校正,校正后的立体相机光轴平行,且行逐行对齐
	Mat Rl, Rr, Pl, Pr, Q;
	stereoRectify(m1_matrix, dist1, m2_matrix, dist2, picture1.size(), R2_matrix, T2_matrix, Rl, Rr, Pl, Pr, Q, 0, -1, picture1.size());

	debug << m1_matrix << endl << m2_matrix << endl << Pl << endl << Pr << endl;

	//分别投影原立体相机图像坐标到校正后立体相机图像坐标
	Mat iRl(3, 3, CV_64F);
	Mat iRr(3, 3, CV_64F);
	for (int i = 0; i<3; i++)
	{
		for (int j = 0; j<3; j++)
		{
			iRl.at<double>(i, j) = Pl.at<double>(i, j);//取Pl的-2列所构成的*3矩阵与Rl相乘获得从原左相机平面图像到矫正后左相机平面图像的转换矩阵
			iRr.at<double>(i, j) = Pr.at<double>(i, j);
		}
	}

	iRl = iRl * Rl;
	iRr = iRr * Rr;

	//根据Pl,PR,Q矩阵,计算像平面矫正后的特征点对坐标(x,y)和左右视差d
	Mat Prec3D(4, 1, CV_64F);
	Mat Pworld3D(4, 1, CV_64F);
	double fxl, fyl, cxl, cyl;
	double fxr, fyr, cxr, cyr;
	//投影相机中心
	//左相机
	fxl = m1_matrix.at<double>(0, 0);
	fyl = m1_matrix.at<double>(1, 1);
	cxl = m1_matrix.at<double>(0, 2);
	cyl = m1_matrix.at<double>(1, 2);
	//右相机
	fxr = m2_matrix.at<double>(0, 0);
	fyr = m2_matrix.at<double>(1, 1);
	cxr = m2_matrix.at<double>(0, 2);
	cyr = m2_matrix.at<double>(1, 2);

	//file << "Num : "<<points1.size() << endl;

	double rr[3][3] = { { 1,0,0 },{ 0,1,0 },{ 0,0,1 } };
	Mat RR1(3, 3, CV_64F, rr); //旋转矩阵
	Mat RR(3, 1, CV_64F); //旋转向量
	Rodrigues(RR1, RR);
	Mat TT = (Mat_<double>(3, 1) << 0, 0, 0);   //平移向量

    //三维点坐标
	vector<Point3d>coord_points;

	//生成的重投影二维坐标
	vector<Point2d>res_2d;

	ofstream dev_txt;
	dev_txt.open("dev.txt");

	ofstream of;
	of.open("trianglePoints.txt");
	Mat s;
	triangulatePoints(Pl, Pr, _dst1, _dst2, s);

	Vec2f pp1;
	for (int i = 0; i < points1.size(); i++) {

		dd = _dst1.at<Vec2f>(i, 0);
		double x1_ = dd[0];
		double y1_ = dd[1];
		Mat xx = (Mat_<double>(3, 1) << x1_, y1_, 1);
		Mat xx1(3, 1, CV_64F);
		xx1 = iRl * xx;
		double x1 = xx1.at<double>(0, 0) / xx1.at<double>(2, 0);
		double y1 = xx1.at<double>(1, 0) / xx1.at<double>(2, 0);

		dd = _dst2.at<Vec2f>(i, 0);
		double x2_ = dd[0];
		double y2_ = dd[1];
		Mat yy = (Mat_<double>(3, 1) << x2_, y2_, 1);
		Mat yy1(3, 1, CV_64F);
		yy1 = iRr * yy;
		double x2 = yy1.at<double>(0, 0) / yy1.at<double>(2, 0);
		double y2 = yy1.at<double>(1, 0) / yy1.at<double>(2, 0);

		//(x1,y1,d,1)构成视差矢量
		Prec3D.at<double>(0, 0) = x1;
		Prec3D.at<double>(1, 0) = y1;
		Prec3D.at<double>(2, 0) = x1 - x2;
		Prec3D.at<double>(3, 0) = 1;
		Pworld3D = Q * Prec3D;
		x1 = Pworld3D.at<double>(0, 0);
		y1 = Pworld3D.at<double>(1, 0);
		double z1 = Pworld3D.at<double>(2, 0);
		double w1 = Pworld3D.at<double>(3, 0);
		double wx = x1 / w1;
		double wy = -y1 / w1;
		double wz = -z1 / w1;

		//畸变后的像素坐标
		double x_a = x1_ * fx1 + Lcx;
		double y_a = y1_ * fy1 + Lcy;

		//匹配点二维坐标
		vector<Point2d>match_2d;
		Mat tem1 = (Mat_<double>(2, 1) << x_a,y_a);
		match_2d = Mat_<Point2d>(tem1);

		Mat cord = (Mat_<double>(3, 1) <<wx,wy,wz);
		coord_points = Mat_<Point3d>(cord);
		
		//生成的重投影二维坐标
		vector<Point2d>res_2d;

		double u1 = (fx1*wx) / wz + Lcx;
		double v1 = (fy1*wy) / wz + Lcy;
		Mat ppp = (Mat_< double > (2, 1) << u1, v1);
		res_2d = Mat_<Point2d>(ppp);

		//projectPoints(coord_points, RR, TT, m1_matrix, dist1, res_2d);

		//res_2d[0].x = (res_2d[0].x - Lcx) / fx1;
		//res_2d[0].y = (res_2d[0].y - Lcy) / fy1;

		double dev;
		dev = norm(res_2d, match_2d, CV_L2);

		dev_txt << "coord_3D=" << coord_points << " ; " << " res_2d=" << res_2d << " ; " << "matcher=" << match_2d << " ; " << "dev=" << dev << endl;  //输出偏差值

		file << wx << "," << wy << "," << wz << endl;
	}


	file,debug.close();
	waitKey(0);
	return 0;
}


标签:knn,Mat,matches,double,残差,opencv,matrix,三维重建,define
来源: https://blog.csdn.net/qq_39707351/article/details/88831040

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有