图像校正使用相机内在和外在参数

时间:2017-02-26 06:10:14

标签: opencv

我想使用从photomodeler软件获得的内在和外在相机参数来纠正立体图像。 我编写了代码(修改此链接https://gist.github.com/anonymous/6586653)并确定了相对旋转和平移参数,但是当我输入图像时,获得的结果并不像预期的那样,尽管我试图找到错误但我不能。 非常感谢您的帮助: 输入图像是: 我无法加载所有图像,因此我将此链接放在图像和结果上: https://www.dropbox.com/s/5tmj9rk91tkrot4/RECTIFICATION_TEST_DATA.docx?dl=0

代码是

    #include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <iomanip>
#include<opencv2/opencv.hpp>
#include <iostream>
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <fstream>

    using namespace std;
    using namespace cv;

    int main(int argc, char** argv)
    {
    //  Mat img1 = imread("E:\\12_0628.tif", 1);
    //  Mat img2 = imread("E:\\12_0629.tif", 1);
        Mat img1 = imread("E:\\DSC_0483.JPG");
        Mat img2 = imread("E:\\DSC_0484.JPG");




//EXTERIOR OREINTATION FOR THE 1ST IMAGE

        double omega1 = -172.672440, phi1 = -80.168311, kappa1 = 163.005082, tx1 = -35.100000, ty1 = -56.700000, tz1 = -59.300000;
//EXTERIOR OREINTATION FOR THE 2ND IMAGE
        double omega2 = 27.576999, phi2 = -67.089920, kappa2 = 2.826051, tx2 = -37.600000, ty2 = -18.600000, tz2 = -41.700000;

        //Rotation matrices of the 1st image
omega1 = omega1  * CV_PI / 180.;
phi1 = phi1 * CV_PI / 180.;
kappa1 = kappa1 * CV_PI / 180.;
omega2 = omega2  * CV_PI / 180.;
phi2 = phi2 * CV_PI / 180.;
kappa2 = kappa2 * CV_PI / 180.;


        Mat RX1 = (Mat_<double>(3, 3) <<
                        1, 0, 0,
                        0, cos(omega1), sin(omega1),
                        0, -sin(omega1), cos(omega1));

        Mat RY1 = (Mat_<double>(3, 3) <<
                        cos(phi1), 0, -sin(phi1),
                        0, 1, 0,
                        sin(phi1), 0, cos(phi1));

        Mat RZ1 = (Mat_<double>(3, 3) <<
                        cos(kappa1), sin(kappa1), 0,
                        -sin(kappa1), cos(kappa1), 0,
                        0, 0, 1);

        // Composed rotation matrix with (RX, RY, RZ)
                Mat R1 = RX1 * RY1 * RZ1;
                Mat T1 = (Mat_<double>(3, 1) <<
                    tx1,
                    ty1,
                    tz1);
    /////////////////////Rotation matrices of the 2nd image//////////////////////////////////////////           


                //Rotation matrices of the 1st image

                Mat RX2 = (Mat_<double>(3, 3) <<
                    1, 0, 0,
                    0, cos(omega2), sin(omega2),
                    0, -sin(omega2), cos(omega2));

                Mat RY2 = (Mat_<double>(3, 3) <<
                    cos(phi2), 0, -sin(phi2),
                    0, 1, 0,
                    sin(phi2), 0, cos(phi2));

                Mat RZ2 = (Mat_<double>(3, 3) <<
                    cos(kappa2), sin(kappa2), 0,
                    -sin(kappa2), cos(kappa2), 0,
                    0, 0, 1);

                // Composed rotation matrix with (RX, RY, RZ)
                Mat R2 = RX2 * RY2 * RZ2;
                Mat T2 = (Mat_<double>(3, 1) <<
                    tx2,
                    ty2,
                    tz2);
                /////////////////////////////////////////////////////////////
        double f = 2284.;// the focal length of the camera nikon D40, this is equivalant to 18mm

        double w = (double)img1.cols;
        double h = (double)img1.rows;
        Mat M = (Mat_<double>(3, 3) <<//camera matrix

                        f       , 0.    , w/2,
                        0.      , f     , h/2,
                        0.      , 0.    , 1.);  



        Mat D = (Mat_<double>(5, 1) <<// distortion coefficicents
                        0,
                        0,
                        0,
                        0,
                        0.
                        );

            Mat R1inv = R1.inv();

            Mat Rrel = R2 * R1inv;
            Mat Trel = (-1 * Rrel) * T1+ T2;



            Mat T = (Mat_<double>(3, 1) <<//translation matrix
                    -2376.6,
                -740.0,


            229.0);


    cout << img1.size() << endl;
    cout << img2.size() << endl;
        //Mat R1, R2, 
    Mat P1, P2, Q;

    stereoRectify(M, D, M, D, img1.size(), Rrel, Trel, //the input data
            R1, R2, P1, P2, Q);//the output data



        Mat map1x, map1y, map2x, map2y;
        Mat imgdst1, imgdst2;
    //  Size (flaot)imageSize;
    //  imageSize = img1.size();
        initUndistortRectifyMap(M, D, R1, P1, img1.size(), CV_32FC1, map1x, map1y);
        initUndistortRectifyMap(M, D, R2, P2, img1.size(), CV_32FC1, map2x, map2y);

        remap(img1, imgdst1, map1x, map1y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
        remap(img2, imgdst2, map2x, map2y, INTER_LINEAR, BORDER_CONSTANT, Scalar());

        namedWindow("image1");
        namedWindow("image2");
        imshow("image1", imgdst1);
        imshow("image2", imgdst2);
    //  imwrite("DSC_0906_rect.jpg", imgdst1);
    //  imwrite("DSC_0913_rect.jpg", imgdst2);
        imwrite("E:\\Researches\\2016-2017_res\\2_8_epipolar_geometry\\temp_image\\output1.bmp", imgdst1);
        imwrite("E:\\Researches\\2016-2017_res\\2_8_epipolar_geometry\\temp_image\\output2.bmp", imgdst2);
        waitKey();
        return 0;
    }

0 个答案:

没有答案
相关问题