不准确的相机姿势结果

时间:2015-03-16 03:11:33

标签: c++ opencv camera-calibration homography

我正在尝试测量相对于基准标记的相机姿势。我在这里测量一个与此类似的标记:http://tinypic.com/view.php?pic=4r6k3q&s=8#.VNgsgzVVK1F

我的代码如下所示。我很难找到比赛,甚至在比赛中画一个方框。

我已经将相机姿势与物理测量结果进行了比较,相机姿势肯定会偏离半米。如果出现轻微的错误,我不会感到惊讶,但是半米会让我感到非常大的错误。

起初,我认为这是我的相机校准,因为鱼眼镜头会造成明显的失真。我重新校准了,但这显然不是问题。

我一直在查看我的代码,看看有什么问题突然出现,但是还没有看到它。可能是因为查看代码太长而且对某人来说可能是显而易见的。

//Load  marker & scene images
Mat marker, scene;
marker = imread(FILE1, CV_LOAD_IMAGE_UNCHANGED);
scene = imread(FILE2, CV_LOAD_IMAGE_UNCHANGED);

//Check to make sure both images were loaded
if( marker.empty() || scene.empty() ) {
    cout << "Error: image cannot be loaded..!!" << endl;
    system("pause");
    return -1;
}

//Convert marker & scene images to grayscale
cvtColor(marker, marker, CV_RGB2GRAY);
cvtColor(scene, scene, CV_RGB2GRAY);

//Detect keypoints using ORB Detector
SiftFeatureDetector detector;
vector<KeyPoint> keypoints1, keypoints2; 
detector.detect(marker, keypoints1);
detector.detect(scene, keypoints2);

//Extract descriptors
SiftDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute( marker, keypoints1, descriptors1 );
extractor.compute( scene, keypoints2, descriptors2 );

//Brute Force Match descriptors from marker to scene
BFMatcher matcher(NORM_L2, true);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);

//Set arbitrary min and max distances between keypoints
double min_dist = 10000;
double max_dist = 0;

//Calculate the min and max distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ ) { 
    double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
}

//Define good matches
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ ) {
    if( matches[i].distance <= max(2*min_dist, 90.0) ) { //90 was found through quick experimenting, this may or may not be optimal.
        good_matches.push_back( matches[i]); 
    }
}

//Exit program if there are an insufficient number of matches
if ( good_matches.size() < 4) {
    cout << "Error: image cannot be loaded..!!" << endl;
    system("pause");
    return -1;
} 

//Draw good matches
Mat matchesImage;
drawMatches( marker, keypoints1, scene, keypoints2,
    good_matches, matchesImage, Scalar::all(-1), Scalar::all(-1),
    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

//Find the coordinates of the source and target points 
vector<Point2f> srcPoints, dstPoints;

for (int i = 0; i < matches.size(); i++) {
    srcPoints.push_back(keypoints1[good_matches[i].queryIdx].pt);
    dstPoints.push_back(keypoints2[good_matches[i].trainIdx].pt);
}

//Find homography
Mat H;
H = findHomography(srcPoints, dstPoints, CV_RANSAC);

//Get corners of marker //pixels
vector<Point2f> marker_corners(4);
marker_corners[0] = cvPoint(0,0);
marker_corners[1] = cvPoint(marker.cols, 0);
marker_corners[2] = cvPoint(marker.cols, marker.rows);
marker_corners[3] = cvPoint(0, marker.rows);

vector<Point2f> scene_corners(4);

//Perform perspective transform
perspectiveTransform(marker_corners, scene_corners, H);

//Draw lines between corners
line(matchesImage, scene_corners[0] + Point2f(marker.cols, 0), scene_corners[1] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);
line(matchesImage, scene_corners[1] + Point2f(marker.cols, 0), scene_corners[2] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);
line(matchesImage, scene_corners[2] + Point2f(marker.cols, 0), scene_corners[3] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);
line(matchesImage, scene_corners[3] + Point2f(marker.cols, 0), scene_corners[0] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);

//Open calibration file
FileStorage fs2("calibration.xml", FileStorage::READ);

//Read in camera matrix and distortion coefficients
Mat cameraMatrix, distCoeffs;
fs2["Camera_Matrix"] >> cameraMatrix;
fs2["Distortion_Coefficients"] >> distCoeffs;
fs2.release();

//Define marker corners in 3d
vector<Point3f> objp1(4);
marker_corners_3d[0] = cvPoint3D32f(0,0,0);
marker_corners_3d[1] = cvPoint3D32f(marker.cols, 0, 0);
marker_corners_3d[2] = cvPoint3D32f(marker.cols, marker.rows, 0);
marker_corners_3d[3] = cvPoint3D32f(0, marker.rows, 0);

//Find Object Pose
solvePnPRansac( marker_corners_3d, scene_corners, cameraMatrix, distCoeffs, rvec, tvec );

//Convert rotation vector to rotation matrix
Mat R;
Rodrigues(rvec, R);

//solvePnPRansac found the pose of the marker relative to the camera.
//Need to find the pose of the camera relatiive to the marker...
//Transpose matrix
R = R.t();

//Convert back to rotation vector
Rodrigues(R,rvec);

//Find the translation vector of camera relative to marker
tvec = -R*tvec;

//Define the camera pose matrix
Mat camPose = Mat::eye(4, 4, R.type());
R.copyTo(camPose.rowRange(0,3).colRange(0,3));
tvec.copyTo(camPose.rowRange(0,3).colRange(3,4));

}

0 个答案:

没有答案