眼睛闪烁检测

时间:2014-04-06 18:49:38

标签: c++ linux opencv

运行期间终端中会显示一些警告:

  

OpenCV错误:setSize中的断言失败(s> = 0),文件/home/me/opencv2.4/modules/core/src/matrix.cpp,第116行

编译程序没有错误并执行,问题是当用户离网络摄像头更近/更远时眼睛ROI大小改变,由于大小的改变,警告出现。我设法通过将眼睛ROI大小设置为等于我的眼睛模板大小来解决这些警告。但是,由于获得的minVal0,最终程序无法对用户的眼睛打开/关闭进行分类。使用的方法是OpenCV Template Matching。或者,我修复我与网络摄像头的距离并修复眼睛模板大小可以避免警告。每次出现警告时,程序都无法对打开/关闭眼睛进行分类。该程序不能有效地工作,因为有时它会错误地将睁开的眼睛分类为闭合,反之亦然。

问题:

  1. 除了模板匹配之外,还有其他方法可以识别开眼和闭眼吗?

  2. 如何改进闪烁分类程序?

  3. 您在opencv C / C ++ API中所知道的任何工作示例都可以对开眼和闭眼进行分类并准确计算闪烁次数?

     static CvMemStorage* storage = 0;  
     // Create a new Haar classifier  
     static CvHaarClassifierCascade* cascade = 0;  
     // Function prototype for detecting and drawing an object from an image  
     bool detect_and_draw( IplImage* image ,CvHaarClassifierCascade* cascade);  
    
     const char *cascade_name[1]={"eyes.xml"};  
     cv::Mat roiImg;  
     int threshold_value = 200;  
     int threshold_type = 3;;  
     int const max_value = 255;  
     int const max_type = 4;  
     int const max_BINARY_value = 255;  
     int hough_thr = 35;  
     cv::Mat src_gray, dst;  
     using namespace cv;  
     Mat img1; Mat img2; Mat templ; Mat result;  
     const char* image_window = "Source Image";  
     const char* result_window = "Result window";  
     int match_method=0;  
     int max_Trackbar = 5;  
     int eye_open=0;  
     int eye_close=0;  
    
     //Matching with 2 images ,eye closed or open  
     void MatchingMethod(cv::Mat templ,int id )  
     {  
      /// Source image to display  
      cv::Mat img_display;  
      roiImg.copyTo( img_display );  
      /// Create the result matrix  
      int result_cols = roiImg.cols - templ.cols + 1;  
      int result_rows = roiImg.rows - templ.rows + 1;  
      result.create( result_cols, result_rows, CV_32FC1 );  
      /// Do the Matching and Normalize  
      cv::matchTemplate( roiImg, templ, result, match_method );  
      cv::normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );  
      /// Localizing the best match with minMaxLoc  
      double minVal; double maxVal; Point minLoc; Point maxLoc;  
      cv::Point matchLoc;  
      cv::minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );  
      ///Justing checkin the match template value reaching the threashold  
      if(id == 0 && (minVal < 0))  
          {  
          eye_open=eye_open+1;  
          if(eye_open == 1)  
               {  
               std::cout<<"Eye Open"<<std::endl;  
               eye_open=0;  
               eye_close=0;  
               }  
          }  
       else if(id == 1 && (minVal < 0))  
          eye_close=eye_close+1;  
          if(eye_close == 1)  
               {  
               std::cout<<"Eye Closed"<<std::endl;  
               eye_close=0;  
               system("python send_arduino.py");  
               }  
      /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better  
      if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )  
       { matchLoc = minLoc; }  
      else  
       { matchLoc = maxLoc; }  
      /// Show me what you got  
      cv::rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );  
      cv::rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );  
      cv::imshow( image_window, img_display );  
      cv::imshow( result_window, result );  
      return;  
     }  
     void detect_blink(cv::Mat roi)  
     {  
          try  
          {        
          MatchingMethod(img1,0);  
           MatchingMethod(img2,1);  
          }  
          catch( cv::Exception& e )  
          {  
               std::cout<<"An exception occued"<<std::endl;  
          }  
     }  
     // Main function, defines the entry point for the program.  
     int main( int argc, char** argv )  
     {  
       if(argc <= 1)  
          {  
          std::cout<<"\n Help "<<std::endl;  
          std::cout<<"\n ------------------------------------\n"<<std::endl;  
          std::cout<<"./blink_detect open_eye.jpg close_eye.jpg\n"<<std::endl;  
         std::cout<<"Eg :: ./blink_detect 2.jpg 3.jpg\n"<<std::endl;  
          std::cout<<"\n ------------------------------------\n"<<std::endl;  
          exit(0);  
          }  
       // Structure for getting video from camera or avi  
       CvCapture* capture = 0;  
       // Images to capture the frame from video or camera or from file  
       IplImage *frame, *frame_copy = 0;  
       // Used for calculations  
       int optlen = strlen("--cascade=");  
       // Input file name for avi or image file.  
       const char* input_name;  
        img1 = imread( argv[1], 1 );  
        img2 = imread( argv[2], 1 );  
       // Load the HaarClassifierCascade  
        /// Create windows  
        cv::namedWindow( image_window, CV_WINDOW_AUTOSIZE );  
        cv::namedWindow( result_window, CV_WINDOW_AUTOSIZE );  
       // Allocate the memory storage  
       storage = cvCreateMemStorage(0);  
       capture = cvCaptureFromCAM( 0);  
       // Create a new named window with title: result  
       cvNamedWindow( "original_frame", 1 );  
       // If loaded succesfully, then:  
       if( capture )  
       {  
         // Capture from the camera.  
         for(;;)  
         {  
           // Capture the frame and load it in IplImage  
           if( !cvGrabFrame( capture ))  
             break;  
           frame = cvRetrieveFrame( capture );  
           // If the frame does not exist, quit the loop  
           if( !frame )  
             break;  
           // Allocate framecopy as the same size of the frame  
           if( !frame_copy )  
             frame_copy = cvCreateImage( cvSize(frame->width,frame->height),  
                           IPL_DEPTH_8U, frame->nChannels );  
           // Check the origin of image. If top left, copy the image frame to frame_copy.   
           if( frame->origin == IPL_ORIGIN_TL )  
             cvCopy( frame, frame_copy, 0 );  
           // Else flip and copy the image  
               for(int i=0;i<1;i++)  
               {  
            cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name[i], 0, 0, 0 );  
            // Check whether the cascade has loaded successfully. Else report and error and quit  
            if( !cascade )  
                 {  
                    fprintf( stderr, "ERROR: Could not load classifier cascade\n" );  
                    return -1;  
                 }  
           // Call the function to detect and draw the face  
                if(detect_and_draw(frame_copy,cascade))  
               {  
                    std::cout<<"Detected"<<std::endl;  
               }                                
               }  
           // Wait for a while before proceeding to the next frame  
           if( cvWaitKey( 1 ) >= 0 )  
            break;  
        }  
         // Release the images, and capture memory  
         cvReleaseHaarClassifierCascade(&cascade);  
         cvReleaseImage( &frame_copy );  
         cvReleaseCapture( &capture );  
         cvReleaseMemStorage(&storage);  
       }  
      return 0;  
     }  
     // Function to detect and draw any faces that is present in an image  
     bool detect_and_draw( IplImage* img,CvHaarClassifierCascade* cascade )  
     {  
       int scale = 1;  
       // Create a new image based on the input image  
       IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );  
       // Create two points to represent the face locations  
       CvPoint pt1, pt2;  
       int i;  
       // Clear the memory storage which was used before  
       cvClearMemStorage( storage );  
       // Find whether the cascade is loaded, to find the faces. If yes, then:  
       if( cascade )  
       {  
         // There can be more than one face in an image. So create a growable sequence of faces.  
         // Detect the objects and store them in the sequence  
         CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,  
                           1.1, 8, CV_HAAR_DO_CANNY_PRUNING,  
                           cvSize(40, 40) );  
         // Loop the number of faces found.  
         for( i = 0; i < (faces ? faces->total : 0); i++ )  
         {  
           // Create a new rectangle for drawing the face  
           CvRect* r = (CvRect*)cvGetSeqElem( faces, i );  
           // Find the dimensions of the face,and scale it if necessary  
           pt1.x = r->x*scale;  
           pt2.x = (r->x+r->width)*scale;  
           pt1.y = r->y*scale;  
           pt2.y = (r->y+r->height)*scale;  
           // Draw the rectangle in the input image  
           cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );  
           cv::Mat image(img);  
           cv::Rect rect;  
           rect = cv::Rect(pt1.x,pt1.y,(pt2.x-pt1.x),(pt2.y-pt1.y));  
           roiImg = image(rect);  
             cv::imshow("roi",roiImg);       
     ///Send to arduino  
           detect_blink(roiImg);  
         }  
       }  
    
       cvShowImage( "original_frame", img );  
       if(i > 0)  
               return 1;  
          else  
               return 0;  
    
       cvReleaseImage( &temp );  
     }
    
  4. 参考: Website referred

0 个答案:

没有答案