使用SURF在检测到的对象周围绘制矩形 [英] Drawing rectangle around detected object using SURF

查看:91
本文介绍了使用SURF在检测到的对象周围绘制矩形的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我正在尝试从以下涉及冲浪检测器的代码中检测到一个对象,我不想绘制匹配项,我想在检测到的对象周围绘制一个矩形,但是由于某种原因我无法获得正确的单应性,请任何人指出我要去哪里.

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"

using namespace cv;

int main()
{
    Mat object = imread( "sample.jpeg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !object.data )
    {
        std::cout<< "Error reading object " << std::endl;
        return -1;
    }

    //Detect the keypoints using SURF Detector
    int minHessian = 500;

    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kp_object;

    detector.detect( object, kp_object );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( object, kp_object, des_object );

    FlannBasedMatcher matcher;

    VideoCapture cap(0);

    namedWindow("Good Matches",0);
    cvResizeWindow("Good Matches",800,800);

    std::vector<Point2f> obj_corners(4);

    //Get the corners from the object
    obj_corners[0] = (cvPoint(0,0));
    obj_corners[1] = (cvPoint(object.cols,0));
    obj_corners[2] = (cvPoint(object.cols,object.rows));
    obj_corners[3] = (cvPoint(0, object.rows));

    char key = 'a';
    int framecount = 0;
    while (key != 27)
    {
        Mat frame;
        cap >> frame;

        if (framecount < 5)
        {
            framecount++;
            continue;
        }

        Mat des_image, img_matches;
        std::vector<KeyPoint> kp_image;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kp_image );
       extractor.compute( image, kp_image, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

       for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
      {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
         {
              good_matches.push_back(matches[i][0]);
            }
       }

        //Draw only "good" matches
     //  drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
           }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( image, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
            line( image, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( image, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( image, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
        }

        //Show detected matches
        imshow( "Good Matches", image );

        key = waitKey(1);
    }
    return 0;
}

解决方案

如果要同时看到两个图像,都将矩形与检测到的对象周围的第二个矩形作为边界,则需要使用 img_matches 数组和绘制线条时不是图片.

如果您只想看到带有标记对象的图像,而不是成对的图像(通过在 image 中画线来定义),则只需将代码更改为:

line( image, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4 );

line( image, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 4 );

line( image, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 4 );

line( image, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 4 );

并在新窗口中显示图像

imshow( "Result", image);

I am trying to detect an object from the following code involving surf detector, I do not want to draw matches, I want to draw a rectangle around the detected object, but somehow I am unable to get correct Homography, please can anyone point out where I am going wrong.

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"

using namespace cv;

int main()
{
    Mat object = imread( "sample.jpeg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !object.data )
    {
        std::cout<< "Error reading object " << std::endl;
        return -1;
    }

    //Detect the keypoints using SURF Detector
    int minHessian = 500;

    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kp_object;

    detector.detect( object, kp_object );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( object, kp_object, des_object );

    FlannBasedMatcher matcher;

    VideoCapture cap(0);

    namedWindow("Good Matches",0);
    cvResizeWindow("Good Matches",800,800);

    std::vector<Point2f> obj_corners(4);

    //Get the corners from the object
    obj_corners[0] = (cvPoint(0,0));
    obj_corners[1] = (cvPoint(object.cols,0));
    obj_corners[2] = (cvPoint(object.cols,object.rows));
    obj_corners[3] = (cvPoint(0, object.rows));

    char key = 'a';
    int framecount = 0;
    while (key != 27)
    {
        Mat frame;
        cap >> frame;

        if (framecount < 5)
        {
            framecount++;
            continue;
        }

        Mat des_image, img_matches;
        std::vector<KeyPoint> kp_image;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kp_image );
       extractor.compute( image, kp_image, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

       for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
      {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
         {
              good_matches.push_back(matches[i][0]);
            }
       }

        //Draw only "good" matches
     //  drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
           }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( image, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
            line( image, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( image, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( image, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
        }

        //Show detected matches
        imshow( "Good Matches", image );

        key = waitKey(1);
    }
    return 0;
}

解决方案

If you want to see both images with a bounding rectangle to the second one around the detected object you need to use img_matches array and not image when you draw the lines.

If you want to see only the image with the marked object and not the pair of images (as you define by drawing lines into image), you just need to change your code to:

line( image, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4 );

line( image, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 4 );

line( image, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 4 );

line( image, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 4 );

and show the image in a new window

imshow( "Result", image);

这篇关于使用SURF在检测到的对象周围绘制矩形的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆