OpenCV calibrateCamera - 断言失败(nimages> 0&& nimages ==(int)imagePoints1.total()

时间:2015-07-10 12:14:06

标签: c++ qt opencv camera-calibration

完整错误:

cv::VideoCapture kalibrowanyPlik;   //the video

cv::Mat frame;
cv::Mat testTwo; //undistorted
cv::Mat cameraMatrix = (cv::Mat_<double>(3, 3) << 2673.579, 0, 1310.689, 0, 2673.579, 914.941, 0, 0, 1);
cv::Mat distortMat = (cv::Mat_<double>(1, 4) << -0.208143,  0.235290,  0.001005,  0.001339);
cv::Mat intrinsicMatrix = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);
cv::Mat distortCoeffs = cv::Mat::zeros(8, 1, CV_64F);
//there are two sets for testing purposes. Values for the first two came from GML camera calibration app. 

std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector<std::vector<cv::Point3f> > objectPoints;

kalibrowanyPlik.open("625.avi");
    //cv::namedWindow("Distorted", CV_WINDOW_AUTOSIZE); //gotta see things
    //cv::namedWindow("Undistorted", CV_WINDOW_AUTOSIZE);

int maxFrames = kalibrowanyPlik.get(CV_CAP_PROP_FRAME_COUNT);    
int success = 0;    //so we can do the calibration only after we've got a bunch

for(int i=0; i<maxFrames-1; i++) {    
    kalibrowanyPlik.read(frame);
    std::vector<cv::Point2f> corners; //creating these here so they're effectively reset each time
    std::vector<cv::Point3f> objectCorners;

    int sizeX = kalibrowanyPlik.get(CV_CAP_PROP_FRAME_WIDTH); //imageSize
    int sizeY = kalibrowanyPlik.get(CV_CAP_PROP_FRAME_HEIGHT);

    cv::cvtColor(frame, frame, CV_BGR2GRAY); //must be gray

    cv::Size patternsize(9,6); //interior number of corners

    bool patternfound = cv::findChessboardCorners(frame, patternsize, corners, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE + cv::CALIB_CB_FAST_CHECK); //finding them corners

    if(patternfound == false) { //gotta know 
        qDebug() << "failure";
    }
    if(patternfound) {
        qDebug() << "success!";
            std::vector<cv::Point3f> objectCorners; //low priority issue - if I don't do this here, it becomes empty. Not sure why. 
            for(int y=0; y<6; ++y) {
                for(int x=0; x<9; ++x) { 
                    objectCorners.push_back(cv::Point3f(x*28,y*28,0)); //filling the array
                }
            }

            cv::cornerSubPix(frame, corners, cv::Size(11, 11), cv::Size(-1, -1),
            cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

            cv::cvtColor(frame, frame, CV_GRAY2BGR); //I don't want gray lines

            imagePoints.push_back(corners); //filling array of arrays with pixel coord array
            objectPoints.push_back(objectCorners); //filling array of arrays with real life coord array, or rather copies of the same thing over and over
            cout << corners << endl << objectCorners;
            cout << endl << objectCorners.size() << "___" << objectPoints.size() <<  "___" << corners.size() <<  "___" << imagePoints.size() << endl;
            cv::drawChessboardCorners(frame, patternsize, cv::Mat(corners), patternfound); //drawing. 

            if(success > 5) {
                double rms =  cv::calibrateCamera(objectPoints, corners, cv::Size(sizeX, sizeY), intrinsicMatrix, distortCoeffs, rvecs, tvecs, cv::CALIB_USE_INTRINSIC_GUESS); 
//error - caused by passing CORNERS instead of IMAGEPOINTS. Also, imageSize is 640x480, and I've set the central point to 1310... etc
                cout << endl << intrinsicMatrix << endl << distortCoeffs << endl;
                cout << "\nrms - " << rms << endl;
            }
            success = success + 1;

        //cv::imshow("Distorted", frame);
        //cv::imshow("Undistorted", testTwo);
        }
    }

代码:

54___7___54___7

我已经完成了一些阅读(This was an especially informative read),包括StackOverflow上的十几个线程,我发现这个错误是由不均匀的imagePoints和objectPoints产生的,或者是由于它们是部分的null或空或零(以及指向无效帮助的教程的链接)。情况并非如此 - .size()检查的输出是:

(...)
277.6792, 208.92903;
241.83429, 208.93048;
206.99866, 208.84637;
(...)
84, 56, 0;
112, 56, 0;
140, 56, 0;
168, 56, 0;
(...)

对于objectCorners(现实生活中的coords),objectPoints(插入的数组的数量)和相同的角(像素坐标)和imagePoints。它们也不是空的,输出是:

void myLogMethod(String format, Object args){
  if(checkIsValid){
    Object obj = new Object(format, args);
    log(obj);
  }
}

示例框架: enter image description here

我知道这是一团糟,但到目前为止,我正在尝试完成代码而不是获得准确的读数。

每个人都有54行。有没有人对导致错误的原因有任何想法?我在Windows 7上使用OpenCV 2.4.8和Qt Creator 5.4。

1 个答案:

答案 0 :(得分:1)

首先,角落和imagePoints必须切换,因为你已经注意到了。

在大多数情况下(如果不是全部),尺寸<= 25足以获得良好的结果。 633左右的焦距并不奇怪,这意味着焦距为633 *传感器尺寸。 CCD或CMOS尺寸必须与说明书一起位于说明书的某处。找出它,时间633,结果就是你的焦距。

减少所用图像数量的一个建议:使用从不同视点拍摄的图像。来自10个不同视点的10个图像比来自相同(或附近)视点的100个图像带来更好的结果。这是视频不是一个好输入的原因之一。我想你的代码,所有传递给calibratecamera的图像可能是如果是这样,校准精度会降低。