OpenCV实现摄像机标定和像素转换,surf寻找特征点,FLANN匹配算子进行匹配
opencv,摄像机标定,特征点检测,特征点匹配,转换矩阵2016-11-09
最近做项目需要摄像机标定和图像转换,OpenCV可以较好的实现这个功能。我的这个例子可以生成两个摄像头的3x3转换矩阵。
但是因为摄像头本身存在成像畸变,尤其是全景摄像机,可能会有更加严重的成像畸变,所有如果试图通过计算两幅完整图像而得到转换单一矩阵,
这个矩阵并不能准确的反应出两幅图像像素之间的对应关系,尤其是靠近边缘区域的像素尤其如此。一个好的建议是将两幅图像分为若干个大小相等
也可以不等的块,分别计算每个块的转换矩阵,这样可以最大程度的降低摄像机成像畸变带来的转换误差。下面是源代码,但是这个代码没有实现分块
<span style="white-space:pre"> </span>//【1】载入原始图片 Mat srcImage1 = imread("tt1.jpg", 1); Mat srcImage2 = imread("tt2.jpg", 1); if (!srcImage1.data || !srcImage2.data) { printf("读取图片错误,请确定目录下是否有imread函数指定的图片存在~! \n"); return false; } //【2】使用SURF算子检测关键点 int minHessian = 600;//SURF算法中的hessian阈值 SurfFeatureDetector detector(minHessian);//定义一个SurfFeatureDetector(SURF) 特征检测类对象 vector<KeyPoint> keypoints_object, keypoints_scene;//vector模板类,存放任意类型的动态数组 //【3】调用detect函数检测出SURF特征关键点,保存在vector容器中 detector.detect(srcImage1, keypoints_object); detector.detect(srcImage2, keypoints_scene); //【4】计算描述符(特征向量) SurfDescriptorExtractor extractor; Mat descriptors_object, descriptors_scene; extractor.compute(srcImage1, keypoints_object, descriptors_object); extractor.compute(srcImage2, keypoints_scene, descriptors_scene); //【5】使用FLANN匹配算子进行匹配 FlannBasedMatcher matcher; vector< vector< DMatch > > matches; //matcher.match(descriptors_object, descriptors_scene, matches); matcher.knnMatch(descriptors_object, descriptors_scene, matches,2); double max_dist = 0; double min_dist = 100;//最小距离和最大距离 vector<DMatch> goodMatches; for (unsigned int i = 0; i < matches.size(); i++) { if (matches[i][0].distance < 0.6*matches[i][1].distance) { goodMatches.push_back(matches[i][0]); } } //【6】计算出关键点之间距离的最大值和最小值 for (unsigned j = 0; j < goodMatches.size(); j++) { double dist = goodMatches[j].distance; if (dist < min_dist) min_dist = dist; if (dist > max_dist) max_dist = dist; } printf(">Max dist 最大距离 : %f \n", max_dist); printf(">Min dist 最小距离 : %f \n", min_dist); //【7】存下匹配距离小于3*min_dist的点对 vector<DMatch>::iterator it; for (it = goodMatches.begin(); it != goodMatches.end();) { if ((*it).distance > 3 * min_dist) { it=goodMatches.erase(it); } else { it++; } } //绘制出匹配到的关键点 Mat img_matches; drawMatches(srcImage1, keypoints_object, srcImage2, keypoints_scene, goodMatches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); //定义两个局部变量 vector<Point2f> obj; vector<Point2f> scene; //从匹配成功的匹配对中获取关键点 for (unsigned int i = 0; i < goodMatches.size(); i++) { obj.push_back(keypoints_object[goodMatches[i].queryIdx].pt); scene.push_back(keypoints_scene[goodMatches[i].trainIdx].pt); } Mat H = findHomography(obj, scene, CV_RANSAC);//计算透视变换 //从待测图片中获取四边角点 vector<Point2f> obj_corners(4); obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(srcImage1.cols, 0); obj_corners[2] = cvPoint(srcImage1.cols, srcImage1.rows); obj_corners[3] = cvPoint(0, srcImage1.rows); vector<Point2f> scene_corners(4); //进行透视变换 perspectiveTransform(obj_corners, scene_corners, H); //绘制出角点之间的直线 line(img_matches, scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 1); line(img_matches, scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[2] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 1); line(img_matches, scene_corners[2] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[3] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 1); line(img_matches, scene_corners[3] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 1); //显示最终结果 imshow("效果图", img_matches);
可以看出无论是特征点的选取还是匹配都是相当准确的,紫色的矩形框就是左边图像四个边角点通过转换到右边的效果,也是相当准确。所需时间是在1秒之内