拼接视频,快速播放帧

时间:2018-09-10 14:53:48

标签: c++ opencv opencv-stitching

我试图通过找到重叠视频之间的单应性来匹配两个关键点,从而将两个视频缝合在一起。我已经成功地将其用于两个不同的图像。

对于视频,我已经加载了两个单独的视频文件,并循环播放了帧,并将它们复制到每个视频的空白矩阵cap1framecap2frame中。

然后,我将每个视频的每个帧发送到拼接功能,该功能根据两个帧之间的单应性对关键点进行匹配,并将它们拼接起来并显示结果图像。 (matching based on openCV example

但是,拼接成功了,这会导致视频播放非常慢,并且画面侧面出现某种图形异常。在照片中看到。

我想知道如何通过快速的视频播放来提高效率。

int main(int argc, char** argv){
      // Create a VideoCapture object and open the input file
      VideoCapture cap1("left.mov");
      VideoCapture cap2("right.mov");
      // Check if camera opened successfully
      if(!cap1.isOpened() || !cap2.isOpened()){
        cout << "Error opening video stream or file" << endl;
        return -1;
      }
        //Trying to loop frames
        for (;;){
        Mat cap1frame;
        Mat cap2frame;

        cap1 >> cap1frame;
        cap2 >> cap2frame;

        // If the frame is empty, break immediately
        if (cap1frame.empty() || cap2frame.empty())
          break;

        //sending each frame from each video to the stitch function then displaying
        imshow( "Result", Stitching(cap1frame,cap2frame));

        if(waitKey(30) >= 0) break;
         //destroyWindow("Stitching");
        // waitKey(0);
      }
      return 0;
    }

enter image description here

1 个答案:

答案 0 :(得分:0)

仅通过视频的第一帧就可以对单应性进行预先计算,从而解决了我的问题。因此,该函数仅被调用一次。

然后,我遍历视频的其余部分以应用视频帧的扭曲,以便可以根据预先计算的单应性将它们缝合在一起。此位最初在我的拼接功能之内。

这时我仍然遇到问题,调用imshow时播放仍然非常缓慢。但是我决定导出结果视频,并且在VideoWriter对象中设置了正确的fps时,该视频可以正常工作。我想知道是否只需要调整imshow的fps播放速度,但是不确定那一点。

我在下面有完整的代码:

#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <opencv2/xfeatures2d/cuda.hpp>
#include <opencv2/opencv.hpp>
#include <vector>
//To get homography from images passed in. Matching points in the images.

Mat Stitching(Mat image1,Mat image2){

    Mat I_1 = image1;
    Mat I_2 = image2;
//based on https://docs.opencv.org/3.3.0/d7/dff/tutorial_feature_homography.html
    cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
        // Step 1: Detect the keypoints:
        std::vector<KeyPoint> keypoints_1, keypoints_2;
        f2d->detect( I_1, keypoints_1 );
        f2d->detect( I_2, keypoints_2 );
        // Step 2: Calculate descriptors (feature vectors)
        Mat descriptors_1, descriptors_2;
        f2d->compute( I_1, keypoints_1, descriptors_1 );
        f2d->compute( I_2, keypoints_2, descriptors_2 );
        // Step 3: Matching descriptor vectors using BFMatcher :
        BFMatcher matcher;
        std::vector< DMatch > matches;
        matcher.match( descriptors_1, descriptors_2, matches );
        // Keep best matches only to have a nice drawing.
        // We sort distance between descriptor matches
        Mat index;
        int nbMatch = int(matches.size());
        Mat tab(nbMatch, 1, CV_32F);
        for (int i = 0; i < nbMatch; i++)
            tab.at<float>(i, 0) = matches[i].distance;
        sortIdx(tab, index, SORT_EVERY_COLUMN + SORT_ASCENDING);
        vector<DMatch> bestMatches;
        for (int i = 0; i < 200; i++)
            bestMatches.push_back(matches[index.at < int > (i, 0)]);
        // 1st image is the destination image and the 2nd image is the src image
        std::vector<Point2f> dst_pts;                   //1st
        std::vector<Point2f> source_pts;                //2nd

        for (vector<DMatch>::iterator it = bestMatches.begin(); it != bestMatches.end(); ++it) {
            //cout << it->queryIdx << "\t" <<  it->trainIdx << "\t"  <<  it->distance << "\n";
            //-- Get the keypoints from the good matches
            dst_pts.push_back( keypoints_1[ it->queryIdx ].pt );
            source_pts.push_back( keypoints_2[ it->trainIdx ].pt );
        }
        Mat H_12 = findHomography( source_pts, dst_pts, CV_RANSAC );
      return H_12;
}
int main(int argc, char** argv){
  //Mats to get the first frame of video and pass to Stitching function.
  Mat I1, h_I1;
  Mat I2, h_I2;
  // Create a VideoCapture object and open the input file
  VideoCapture cap1("left.mov");
  VideoCapture cap2("right.mov");
  cap1.set(CV_CAP_PROP_BUFFERSIZE, 10);
  cap2.set(CV_CAP_PROP_BUFFERSIZE, 10);
  //Check if camera opened successfully
  if(!cap1.isOpened() || !cap2.isOpened()){
    cout << "Error opening video stream or file" << endl;
    return -1;
  }
//passing first frame to Stitching function
  if (cap1.read(I1)){
     h_I1 = I1;
   }

   if (cap2.read(I2)){
     h_I2 = I2;
   }
   Mat homography;
//passing here.
   homography = Stitching(h_I1,h_I2);
  std::cout << homography << '\n';

//creating VideoWriter object with defined values.
VideoWriter video("video/output.avi",CV_FOURCC('M','J','P','G'),30, Size(1280,720));

//Looping through frames of both videos.
    for (;;){
    Mat cap1frame;
    Mat cap2frame;

    cap1 >> cap1frame;
    cap2 >> cap2frame;

    // If the frame is empty, break immediately
    if (cap1frame.empty() || cap2frame.empty())
      break;
      Mat warpImage2;
      //warping the second video cap2frame so it matches with the first one.
      //size is defined as the final video size
      warpPerspective(cap2frame, warpImage2, homography, Size(1280,720), INTER_CUBIC);
      //final is the final canvas where both videos will be warped onto.
      Mat final (Size(1280,720), CV_8UC3);
      //Mat final(Size(cap1frame.cols*2 + cap1frame.cols, cap1frame.rows*2),CV_8UC3);
      //Using roi getting the relivent areas of each video.
      Mat roi1(final, Rect(0, 0,  cap1frame.cols, cap1frame.rows));
      Mat roi2(final, Rect(0, 0, warpImage2.cols, warpImage2.rows));
      //warping images on to the canvases which are linked with the final canvas.
      warpImage2.copyTo(roi2);
      cap1frame.copyTo(roi1);
      //writing to video.
      video.write(final);
      //imshow ("Result", final);
    if(waitKey(30) >= 0) break;
  }
  video.release();
  return 0;
}