使用opencv和c ++找到与复杂形状最接近匹配的最佳方法是什么?

时间:2016-01-18 19:05:06

标签: c++ opencv shapes object-comparison

好的,这是我的源代码。此代码将在文件中获取图像,并将其与另一个文件中的图像列表进行比较。在图像文件中,您必须包含一个.txt文件,其中包含您要比较的文件中所有图像的名称。我遇到的问题是这两个图像非常相似但不完全相同。我需要一种方法来进一步细化这些匹配。甚至可能是一种比较这两种形状的全新方式(更大的块,blob等)。我正在考虑的一种方法实际上是制作一个完整的关键点地图,并且只比较关键点,如果它们处于或接近某个对应于两个图像的点。即:比较点(12,200)处的关键点,(x,y)的+ -10像素,并查看另一图像上是否存在类似的关键点。

我需要的是一种从ActualImplantXrayOfThatSameImplantButASlightlyDifferentSize获得最佳匹配的方法。拜托,谢谢!

PS:你会看到我正在试验Sobel Derivatives和其他类似事情的评论部分。我最终只是在X射线上调整对比度和亮度以获得最佳轮廓。在用于试图匹配任何东西之前,必须对植入物的图像进行相同的操作。

#include "opencv2\highgui\highgui.hpp"
#include "opencv2\features2d\features2d.hpp"
#include "opencv2\imgproc.hpp"


#include <iostream>
#include <fstream>
#include <ctime>
const string defaultDetector = "ORB";
const string defaultDescriptor = "ORB";
const string defaultMatcher = "BruteForce-Hamming";
const string defaultXrayImagePath = "../../xray.png";
const string defaultImplantImagesTextListPath = "../../implantImage.txt";
const string defaultPathToResultsFolder = "../../results";

static void printIntro(const string& appName)
{
    cout << "/*                                                                                                       *\n"
        << " * Created by: Alex Gatz. 1/11/12. Created for: Xray Implant Identification                               *\n"
        << " * This code was created to scan a file full of images of differnt implants, generate keypoint maps       *\n"
        << " * for each image, and identifywhich image most closely matches a chosen image in another folder          *\n"
        << " */                                                                                                       *\n"
        << endl;

    cout << endl << "Format:\n" << endl;
    cout << "./" << appName << " [detector] [descriptor] [matcher] [xrayImagePath] [implantImagesTextListPath] [pathToSaveResults]" << endl;
    cout << endl;

    cout << "\nExample:" << endl
        << "./" << appName << " " << defaultDetector << " " << defaultDescriptor << " " << defaultMatcher << " "
        << defaultXrayImagePath << " " << defaultImplantImagesTextListPath << " " << defaultPathToResultsFolder << endl;
}

static void maskMatchesByImplantImgIdx(const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask)
{
    mask.resize(matches.size());
    fill(mask.begin(), mask.end(), 0);
    for (size_t i = 0; i < matches.size(); i++)
    {
        if (matches[i].imgIdx == trainImgIdx)
            mask[i] = 1;
    }
}

static void readImplantFilenames(const string& filename, string& dirName, vector<string>& implantFilenames)
{
    implantFilenames.clear();

    ifstream file(filename.c_str());
    if (!file.is_open())
        return;

    size_t pos = filename.rfind('\\');
    char dlmtr = '\\';
    if (pos == String::npos)
    {
        pos = filename.rfind('/');
        dlmtr = '/';
    }
    dirName = pos == string::npos ? "" : filename.substr(0, pos) + dlmtr;

    while (!file.eof())
    {
        string str; getline(file, str);
        if (str.empty()) break;
        implantFilenames.push_back(str);
    }
    file.close();
}

static bool createDetectorDescriptorMatcher(const string& detectorType, const string& descriptorType, const string& matcherType,
    Ptr<FeatureDetector>& featureDetector,
    Ptr<DescriptorExtractor>& descriptorExtractor,
    Ptr<DescriptorMatcher>& descriptorMatcher)
{
    cout << "< Creating feature detector, descriptor extractor and descriptor matcher ..." << endl;
    featureDetector = ORB::create( //All of these are parameters that can be adjusted to effect match accuracy and process time.
        10000, //int nfeatures = Maxiumum number of features to retain; max vaulue unknown, higher number takes longer to process. Default: 500
        1.4f, //float scaleFactor= Pyramid decimation ratio; between 1.00 - 2.00.                                                 Default: 1.2f
        6,   //int nlevels = Number of pyramid levels used; more levels more time taken to process, but more accurate results.   Default: 8
        40,   //int edgeThreshold = Size of the border where the features are not detected. Should match patchSize roughly.       Default: 31
        0,    //int firstLevel = Should remain 0 for now.                                                                         Default: 0
        4,    //int WTA_K = Should remain 2.                                                                                      Default: 2
        ORB::HARRIS_SCORE,    //int scoreType = ORB::HARRIS_SCORE is the most accurate ranking possible for ORB.                                          Default: HARRIS_SCORE
        33    //int patchSize = size of patch used by the oriented BRIEF descriptor. Should match edgeThreashold.                 Default: 31
        ); 
    //featureDetector = ORB::create(); // <-- Uncomment this and comment the featureDetector above for default detector-
    //OpenCV 3.1 got rid of the dynamic naming of detectors and extractors. 

    //These two are one in the same when using ORB, some detectors and extractors are separate
    // in which case you would set "descriptorExtractor = descriptorType::create();" or its equivilant.
    descriptorExtractor = featureDetector;

    descriptorMatcher = DescriptorMatcher::create(matcherType);

    cout << ">" << endl;

    bool isCreated = !(featureDetector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty());
    if (!isCreated)
        cout << "Can not create feature detector or descriptor extractor or descriptor matcher of given types." << endl << ">" << endl;

    return isCreated;
}

static void manipulateImage(Mat& image) //Manipulates images into only showing an outline!
{
    //Sobel Dirivative edge finder

    //int scale = 1;
    //int delta = 0;
    //int ddepth = CV_16S;
    ////equalizeHist(image, image); //This will equilize the lighting levels in each image.
    //GaussianBlur(image, image, Size(3, 3), 0, 0, BORDER_DEFAULT);

    //Mat grad_x, grad_y;
    //Mat abs_grad_x, abs_grad_y;
    ////For x
    //Sobel(image, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
    //convertScaleAbs(grad_x, abs_grad_x);
    ////For y
    //Sobel(image, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
    //convertScaleAbs(grad_y, abs_grad_y);

    //addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, image);

    //Specific Level adjustment (very clean)
    double alpha = 20; //Best Result: 20
    int beta = -300;   //Best Result: -300
    image.convertTo(image, -1, alpha, beta);
}

static bool readImages(const string& xrayImageName, const string& implantFilename,
    Mat& xrayImage, vector <Mat>& implantImages, vector<string>& implantImageNames)
{
    //TODO: Add a funtion call to automatically adjust all images loaded to best settings for matching. 
    cout << "< Reading the images..." << endl;
    xrayImage = imread(xrayImageName, CV_LOAD_IMAGE_GRAYSCALE); //Turns the image gray while loading.
    manipulateImage(xrayImage); //Runs image manipulations

    if (xrayImage.empty())
    {
        cout << "Xray image can not be read." << endl << ">" << endl;
        return false;
    }
    string trainDirName;
    readImplantFilenames(implantFilename, trainDirName, implantImageNames);
    if (implantImageNames.empty())
    {
        cout << "Implant image filenames can not be read." << endl << ">" << endl;
        return false;
    }
    int readImageCount = 0;
    for (size_t i = 0; i < implantImageNames.size(); i++)
    {
        string filename = trainDirName + implantImageNames[i];
        Mat img = imread(filename, CV_LOAD_IMAGE_GRAYSCALE); //Turns imamges gray while loading.
        //manipulateImage(img); //Runs Sobel Dirivitage on implant image.
        if (img.empty())
        {
            cout << "Implant image " << filename << " can not be read." << endl;
        }
        else
        {
            readImageCount++;
        }
        implantImages.push_back(img);
    }
    if (!readImageCount)
    {
        cout << "All implant images can not be read." << endl << ">" << endl;
        return false;
    }
    else
        cout << readImageCount << " implant images were read." << endl;
    cout << ">" << endl;

    return true;
}

static void detectKeypoints(const Mat& xrayImage, vector<KeyPoint>& xrayKeypoints,
    const vector<Mat>& implantImages, vector<vector<KeyPoint> >& implantKeypoints,
    Ptr<FeatureDetector>& featureDetector)
{
    cout << endl << "< Extracting keypoints from images..." << endl;
    featureDetector->detect(xrayImage, xrayKeypoints);
    featureDetector->detect(implantImages, implantKeypoints);
    cout << ">" << endl;
}

static void computeDescriptors(const Mat& xrayImage, vector<KeyPoint>& implantKeypoints, Mat& implantDescriptors,
    const vector<Mat>& implantImages, vector<vector<KeyPoint> >& implantImageKeypoints, vector<Mat>& implantImageDescriptors,
    Ptr<DescriptorExtractor>& descriptorExtractor)
{
    cout << "< Computing descriptors for keypoints..." << endl;
    descriptorExtractor->compute(xrayImage, implantKeypoints, implantDescriptors);
    descriptorExtractor->compute(implantImages, implantImageKeypoints, implantImageDescriptors);

    int totalTrainDesc = 0;
    for (vector<Mat>::const_iterator tdIter = implantImageDescriptors.begin(); tdIter != implantImageDescriptors.end(); tdIter++)
        totalTrainDesc += tdIter->rows;

    cout << "Query descriptors count: " << implantDescriptors.rows << "; Total train descriptors count: " << totalTrainDesc << endl;
    cout << ">" << endl;
}

static void matchDescriptors(const Mat& xrayDescriptors, const vector<Mat>& implantDescriptors,
    vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher)
{
    cout << "< Set implant image descriptors collection in the matcher and match xray descriptors to them..." << endl;
    //time_t timerBegin, timerEnd;

    //time(&timerBegin);
    descriptorMatcher->add(implantDescriptors);
    descriptorMatcher->train();
    //time(&timerEnd);
    //double buildTime = difftime(timerEnd, timerBegin);

    //time(&timerBegin);
    descriptorMatcher->match(xrayDescriptors, matches);
    //time(&timerEnd);
    //double matchTime = difftime(timerEnd, timerBegin);

    CV_Assert(xrayDescriptors.rows == (int)matches.size() || matches.empty());

    cout << "Number of imageMatches: " << matches.size() << endl;
    //cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl;
    cout << ">" << endl;
}

static void saveResultImages(const Mat& xrayImage, const vector<KeyPoint>& xrayKeypoints,
    const vector<Mat>& implantImage, const vector<vector<KeyPoint> >& implantImageKeypoints,
    const vector<DMatch>& matches, const vector<string>& implantImagesName, const string& resultDir)
{
    cout << "< Save results..." << endl;
    Mat drawImg;
    vector<char> mask;
    for (size_t i = 0; i < implantImage.size(); i++)
    {
        if (!implantImage[i].empty())
        {
            maskMatchesByImplantImgIdx(matches, (int)i, mask);
            drawMatches(xrayImage, xrayKeypoints, implantImage[i], implantImageKeypoints[i],
                matches, drawImg, Scalar::all(-1), Scalar(0, 0, 255), mask, 4);
            string filename = resultDir + "/result_" + implantImagesName[i];
            if (!imwrite(filename, drawImg))
                cout << "Image " << filename << " can not be saved (may be because directory " << resultDir << " does not exist)." << endl;
        }
    }
    cout << ">" << endl;

    //After all results have been saved, another function will scan and place the final result in a separate folder. 
    //For now this save process is required to manually access each result and determine if the current settings are working well. 
}

int main(int argc, char** argv)
{
    //Intialize variables to global defaults.
    string detector = defaultDetector;
    string descriptor = defaultDescriptor;
    string matcher = defaultMatcher;
    string xrayImagePath = defaultXrayImagePath;
    string implantImagesTextListPath = defaultImplantImagesTextListPath;
    string pathToSaveResults = defaultPathToResultsFolder;

    //As long as you have 7 arguments, you can procede
    if (argc != 7 && argc != 1)
    {
        //This will be called if the incorrect amount of commands are used to start the program.
        printIntro(argv[1]);
        system("PAUSE");
        return -1;
    }

    //As long as you still have 7 arguments, I will set the variables for this
    // to the arguments you decided on. 
    //If testing using XrayID --> Properties --> Debugging --> Command Arguments, remember to start with [detector] as the first command
    // C++ includes the [appName] command as the first argument automantically. 

    if (argc != 1) //I suggest placing a break here and stepping through this to ensure the proper commands were sent in. With a
                   // GUI this would nto matter because the GUI would structure the input and use a default if no input was used. 
    {
        detector = argv[1]; 
        descriptor = argv[2]; 
        matcher = argv[3];
        xrayImagePath = argv[4]; 
        implantImagesTextListPath = argv[5];
        pathToSaveResults = argv[6];
    }

    //Set up cv::Ptr's for tools. 
    Ptr<FeatureDetector> featureDetector;
    Ptr<DescriptorExtractor> descriptorExtractor;
    Ptr<DescriptorMatcher> descriptorMatcher;

    //Check to see if tools are created, if not true print intro and close program.
    if (!createDetectorDescriptorMatcher(detector, descriptor, matcher, featureDetector, descriptorExtractor, descriptorMatcher))
    {
        printIntro(argv[0]);
        system("PAUSE");
        return -1;
    }

    Mat testImage;
    vector<Mat> implantImages;
    vector<string> implantImagesNames;

    //Check to see if readImages completes properly, if not true print intro and close program. 
    if (!readImages(xrayImagePath, implantImagesTextListPath, testImage, implantImages, implantImagesNames))
    {
        printIntro(argv[0]);
        system("PAUSE");
        return -1;
    }

    vector<KeyPoint> xrayKeypoints;
    vector<vector<KeyPoint> > implantKeypoints;
    detectKeypoints(testImage, xrayKeypoints, implantImages, implantKeypoints, featureDetector);

    Mat xrayDescriptors;
    vector<Mat> implantTestImageDescriptors;
    computeDescriptors(testImage, xrayKeypoints, xrayDescriptors, implantImages, implantKeypoints, implantTestImageDescriptors,
        descriptorExtractor);

    vector<DMatch> imageMatches;
    matchDescriptors(xrayDescriptors, implantTestImageDescriptors, imageMatches, descriptorMatcher);
    saveResultImages(testImage, xrayKeypoints, implantImages, implantKeypoints, imageMatches, implantImagesNames, pathToSaveResults);

    system("PAUSE");
    return 0;
}

2 个答案:

答案 0 :(得分:0)

该图像看起来很像人造臀部。如果你正在处理医学影像,你一定要查看The Insight Toolkit(ITK),它有许多为这个领域的特定需求而设计的特殊功能。您可以在真实世界图像和模板数据之间进行简单的模型图像注册,以找到最佳结果。我认为使用这种方法可以获得比上述基于点的测试更好的结果。

这种配准执行一组参数的迭代优化(在这种情况下,仿射变换),寻求找到模型到图像数据的最佳映射。

上面的示例拍摄了一张固定图像,并尝试找到将运动图像映射到其上的变换。变换是2D仿射变换(在这种情况下为旋转和平移),其参数是运行优化器的结果,该优化器最大化匹配度量。度量标准衡量固定图像和变换后的运动图像的匹配程度。插值器采用运动图像并应用变换将其映射到固定图像上。

在示例图像中,固定图像可以是original X-ray,动态图像可以是actual implant。您可能需要添加缩放以进行完全仿射变换,因为两者的大小不同。

度量标准衡量转换的运动图像与固定图像的匹配程度,因此您需要确定匹配有效的容差或最小度量标准。如果图像非常不同,则度量标准将非常低并且可以被拒绝。

输出是一组变换参数,输出图像是应用于运动图像的最终最佳变换(不是图像的组合)。结果基本上告诉你在X射线中发现植入物的位置。

答案 1 :(得分:0)

  • 尝试以下代码。希望这对您有所帮助。

    #include <opencv2/nonfree/nonfree.hpp>
    #include <iostream>
    #include <dirent.h>
    #include <ctime>  
    #include <stdio.h>
    using namespace cv;
    using namespace std;
    
    int main(int argc, const char *argv[])
    {
       double ratio = 0.9;
    
       Mat image1 = imread("Image1_path);
       Mat image2 = cv::imread("Image2_path");
    
       Ptr<FeatureDetector> detector;
       Ptr<DescriptorExtractor> extractor;
    
      // TODO default is 500 keypoints..but we can change
      detector = FeatureDetector::create("ORB");
      extractor = DescriptorExtractor::create("ORB");
    
      vector<KeyPoint> keypoints1, keypoints2;
      detector->detect(image1, keypoints1);
      detector->detect(image2, keypoints2);
    
      cout << "# keypoints of image1 :" << keypoints1.size() << endl;
      cout << "# keypoints of image2 :" << keypoints2.size() << endl;
    
      Mat descriptors1,descriptors2;
      extractor->compute(image1,keypoints1,descriptors1);
      extractor->compute(image2,keypoints2,descriptors2);
    
      cout << "Descriptors size :" << descriptors1.cols << ":"<< descriptors1.rows << endl;
    
      vector< vector<DMatch> > matches12, matches21;
      Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
      matcher->knnMatch( descriptors1, descriptors2, matches12, 2);
      matcher->knnMatch( descriptors2, descriptors1, matches21, 2);
    
      //BFMatcher bfmatcher(NORM_L2, true);
      //vector<DMatch> matches;
      //bfmatcher.match(descriptors1, descriptors2, matches);
      double max_dist = 0; double min_dist = 100;
      for( int i = 0; i < descriptors1.rows; i++)
      {
          double dist = matches12[i].data()->distance;
          if(dist < min_dist)
             min_dist = dist;
          if(dist > max_dist)
             max_dist = dist;
      }
      printf("-- Max dist : %f \n", max_dist);
      printf("-- Min dist : %f \n", min_dist);
      cout << "Matches1-2:" << matches12.size() << endl;
      cout << "Matches2-1:" << matches21.size() << endl;
    
      std::vector<DMatch> good_matches1, good_matches2;
      for(int i=0; i < matches12.size(); i++)
      {
          if(matches12[i][0].distance < ratio * matches12[i][1].distance)
             good_matches1.push_back(matches12[i][0]);
      }
    
      for(int i=0; i < matches21.size(); i++)
      {
          if(matches21[i][0].distance < ratio * matches21[i][1].distance)
             good_matches2.push_back(matches21[i][0]);
      }
    
      cout << "Good matches1:" << good_matches1.size() << endl;
      cout << "Good matches2:" << good_matches2.size() << endl;
    
     // Symmetric Test
     std::vector<DMatch> better_matches;
     for(int i=0; i<good_matches1.size(); i++)
     {
         for(int j=0; j<good_matches2.size(); j++)
         {
             if(good_matches1[i].queryIdx == good_matches2[j].trainIdx && good_matches2[j].queryIdx == good_matches1[i].trainIdx)
             {
                 better_matches.push_back(DMatch(good_matches1[i].queryIdx, good_matches1[i].trainIdx, good_matches1[i].distance));
            break;
             }
         }
     }
    
     cout << "Better matches:" << better_matches.size() << endl;
     double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
    
     // show it on an image
     Mat output;
     drawMatches(image1, keypoints1, image2, keypoints2, better_matches, output);
     imshow("Matches result",output);
     waitKey(0);
    
     return 0;
    }