SURF特征检测 - OpenCV

时间:2014-08-21 09:25:09

标签: java android c++ opencv surf

我正在开发一个Android应用程序,其主要目的是检测场景中的被请求对象。要做到这一点,我正在使用OpenCV的SURF算法。我没有检测到“好运”,因为我不知道什么时候“找到”了。

我使用设备相机获取了一个帧,我按照以下步骤获取对象的关键点和描述符:

Java代码

public void onSnapClick(View v) {
    Imgproc.GaussianBlur(frameGray, frameGray, new Size(3, 3), 2);
    Imgproc.Canny(frameGray, frameGray, 40, 120);
    Imgproc.resize(frameGray, frameGray, new Size(320, 240));
    FindFeatures(frameGray.getNativeObjAddr()); //JNI call
    //Some code to store data in DB...
}

JNI致电

double hessianThreshold=600;
int nOctaves=4;
int nOctaveLayers=2;
bool extended=true;
bool upright=false;

JNIEXPORT void JNICALL Java_es_ugr_reconocimiento_Juego_FindFeatures(JNIEnv* env, jobject, jlong addrGray) {
    Mat& frameGray= *(Mat*) addrGray;
    vector<KeyPoint> keyPoints;
    Mat descriptores;
    SurfFeatureDetector detector_Surf(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
    SurfDescriptorExtractor extractor_Surf;
    detector_Surf.detect(frameGray, keyPoints);
    if (keyPoints.size() > 0)
        extractor_Surf.compute(frameGray, keyPoints, descriptores);
}

现在我选择我想要找到的对象,然后按照以下步骤操作:

Java代码

public void onSearchClick(View v) {
    Imgproc.GaussianBlur(frameGray, frameGray, new Size(3, 3), 2);
    Imgproc.Canny(frameGray, frameGray, 40, 120);
    Imgproc.resize(frameGray, frameGray, new Size(320, 240));
    nObject = FindObjects(frameGray.getNativeObjAddr()); //JNI call
    if (nObject = searchObject) 
        //draw frame with a rectangle around the found object in the scenario....
}

JNI致电

double hessianThreshold=600;
int nOctaves=4;
int nOctaveLayers=2;
bool extended=true;
bool upright=false;

JNIEXPORT jint JNICALL Java_es_ugr_reconocimiento_Juego_FindObjects(JNIEnv* env, jobject, jlong addrGray) {
    Mat& frameGray = *(Mat*) addrGray;
    vector<KeyPoint> keyPoints_esc;
    Mat descriptores_esc;
    SurfFeatureDetector detector_Surf(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
    SurfDescriptorExtractor extractor_Surf;
    detector_Surf.detect(frameGray , keyPoints_esc);
    if (keyPoints_esc.size() == 0) return -1;
    extractor_Surf.compute(frameGray , keyPoints_esc, descriptores_esc);
    if (descriptores_esc.rows() == 0) return -1;

    for(int i=0;i<lstObjects.size();i++){
        Mat descriptores_obj = lstDescriptors.at(i);
        vector<KeyPoint> keyPoints_obj = lstKeyPoints.at(i);

        FlannBasedMatcher matcher;
        vector<vector<DMatch> > matches;
        matcher.knnMatch(descriptores_obj, descriptores_esc, matches, 2);
        // ----------------------------------------------------------------------
        // Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
        // or a small arbitary value ( 0.02 ) in the event that min_dist is very
        // small)
        // PS.- radiusMatch can also be used here.
        // ----------------------------------------------------------------------
        vector<DMatch> good_matches;
        //THIS LOOP IS SENSITIVE TO SEGFAULTS
        for (int i = 0; i < min(descriptores_obj.rows - 1, (int) matches.size());i++){
            if ( (matches[i][0].distance < 0.6 * (matches[i][1].distance)) && 
                 ((int) matches[i].size() <= 2 && (int) matches[i].size() > 0) ) {
                    good_matches.push_back(matches[i][0]);
            }
        }

        if (good_matches.size() >= nThreshold) {
            vector < Point2f > obj;
            vector < Point2f > scene;

            for (int i = 0; i < good_matches.size(); i++) {
                //-- Get the keypoints from the good matches
                obj.push_back(keyPoints_obj[good_matches[i].queryIdx].pt);
                scene.push_back(keyPoints_esc[good_matches[i].trainIdx].pt);
            }

            Mat H = findHomography(obj, scene, CV_RANSAC);

            vector<Point2f> obj_corners(4);
            obj_corners[0] = cvPoint(0, 0);
            obj_corners[1] = cvPoint(240, 0);
            obj_corners[2] = cvPoint(240, 320);
            obj_corners[3] = cvPoint(0, 320);
            vector<Point2f> scene_corners(4);

            perspectiveTransform(obj_corners, scene_corners, H);

            line(frameGray, scene_corners[0], scene_corners[1], Scalar(255, 0, 0), 4);
            line(frameGray, scene_corners[1], scene_corners[2], Scalar(255, 0, 0), 4);
            line(frameGray, scene_corners[2], scene_corners[3], Scalar(255, 0, 0), 4);
            line(frameGray, scene_corners[3], scene_corners[0], Scalar(255, 0, 0), 4);

            for (unsigned int i = 0; i < scene.size(); i++) {
                const Point2f& kp = scene[i];
                circle(frameGray, Point(kp.x, kp.y), 10, Scalar(255, 255, 255, 255));
            }

            return i; //position of the matched object

        }

    }
}

我不知道这个比较中哪个阈值最好

if (good_matches.size() >= nThreshold) // do findHomography...

我一直在搜索,几乎我发现的每个代码都包含数字4作为nThreshold,但对我来说它不能正常工作。我的代码几乎每次都“找到”一个对象。

还有其他更好的方法吗?就像使用不同的匹配器或其他阈值或者试图弄清楚是否要创建单应性来创建类似于矩形的东西(我说这是因为有时它“找到”了一些东西但是画了四条没有构建矩形的线条。) / p>

1 个答案:

答案 0 :(得分:0)

请在代码中进行以下更改

int nThreshold= 100;
       if (good_matches.size() >= nThreshold) 
        {
        continue; // This line is to prevent further steps of matching if there are too many good matches (Lot of ambiguous points results in false match)
        }
        vector < Point2f > obj;
        vector < Point2f > scene;

        for (int i = 0; i < good_matches.size(); i++) {
            //-- Get the keypoints from the good matches
            obj.push_back(keyPoints_obj[good_matches[i].queryIdx].pt);
            scene.push_back(keyPoints_esc[good_matches[i].trainIdx].pt);
               }

// Skip doing homography if the object and scene contains less than four points(cant draw a rectangle if less than 4 points, hence your program will crash here if you do not handle the exception)
      if(obj.size() < 4 || scene.size() < 4)
       {
       continue;
       }

       Mat H = findHomography(obj, scene, CV_RANSAC);
相关问题