OpenCV立体图像对校正...显示结果 [英] OpenCV stereo image pair correction...displaying the results

查看:1144
本文介绍了OpenCV立体图像对校正...显示结果的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我试图使用OpenCV来拍摄立体图像对...,即同一主体的左图像和右图像...然后在不知道相机的任何属性的情况下对它们进行旋转和平移校正。一旦图像被更正,我应该能够显示给用户。

I am attempting to use OpenCV to take stereo image pairs...ie a left and a right image of the same subject...and then correct them for rotation and translation without knowing any of the properties of the camera. Once the images are corrected I should be able to display them to the user.

到目前为止,我已经合并了两个演示程序从OpenCV示例目录,很不好。 ..我会清理代码,并更好地安排它,当我得到它的工作...,似乎是工作,但是当我试图显示结果程序崩溃与调试错误。在命令窗口中,它说OpenCV错误:断言失败(scn == 1&(dcn == 3 || dcn == 4))在文件中的未知函数... \opencv \modules\imgproc\src\color.cpp,第2453行

So far I have merged two demo programs from the OpenCV samples directory, badly for the moment...I will clean the code up and arrange it more nicely when I get it working...and it seems to be working, however when I attempt to display the results the program crashes with a debug error. In the command window it says "OpenCV Error: Assertion failed (scn ==1 && (dcn == 3 || dcn == 4)) in unknown function in file ........\opencv\modules\imgproc\src\color.cpp, line 2453"

注释掉代码的各个部分以显示结果只会导致不同的OpenCV错误。这是我的代码。如果有人可以帮助我永远爱你。

Commenting out various parts of the code to display the results just results in different OpenCV Errors. Here's my code. If anyone can help I will love you forever.

#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"

#include <iostream>

using namespace cv;
using namespace std;

void help(char** argv)
{
    cout << "\nThis program demonstrates keypoint finding and matching between 2 images using features2d framework.\n"
     << "Example of usage:\n"
     << argv[0] << " [detectorType] [descriptorType] [image1] [image2] [ransacReprojThreshold]\n"
     << "\n"
     << "Matches are filtered using homography matrix if ransacReprojThreshold>=0\n"
     << "Example:\n"
     << "./descriptor_extractor_matcher SURF SURF  cola1.jpg cola2.jpg 3\n"
     << "\n"
     << "Possible detectorType values: see in documentation on createFeatureDetector().\n"
     << "Possible descriptorType values: see in documentation on createDescriptorExtractor().\n" << endl;
}

const string winName = "rectified";

void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
                         const Mat& descriptors1, const Mat& descriptors2,
                         vector<DMatch>& filteredMatches12, int knn=1 )
{
    filteredMatches12.clear();
    vector<vector<DMatch> > matches12, matches21;
    descriptorMatcher->knnMatch( descriptors1, descriptors2, matches12, knn );
    descriptorMatcher->knnMatch( descriptors2, descriptors1, matches21, knn );
    for( size_t m = 0; m < matches12.size(); m++ )
    {
        bool findCrossCheck = false;
        for( size_t fk = 0; fk < matches12[m].size(); fk++ )
        {
            DMatch forward = matches12[m][fk];

            for( size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++ )
            {
                DMatch backward = matches21[forward.trainIdx][bk];
                if( backward.trainIdx == forward.queryIdx )
                {
                    filteredMatches12.push_back(forward);
                    findCrossCheck = true;
                    break;
                }
            }
            if( findCrossCheck ) break;
        }
    }
}

void doIteration( const Mat& leftImg, Mat& rightImg,
                  vector<KeyPoint>& keypoints1, const Mat& descriptors1,
                  Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
                  Ptr<DescriptorMatcher>& descriptorMatcher,
                  double ransacReprojThreshold )
{
    assert( !leftImg.empty() );
    Mat H12;
    assert( !rightImg.empty()/* && rightImg.cols==leftImg.cols && rightImg.rows==leftImg.rows*/ );

    cout << endl << "< Extracting keypoints from second image..." << endl;
    vector<KeyPoint> keypoints2;
    detector->detect( rightImg, keypoints2 );
    cout << keypoints2.size() << " points" << endl << ">" << endl;

    cout << "< Computing descriptors for keypoints from second image..." << endl;
    Mat descriptors2;
    descriptorExtractor->compute( rightImg, keypoints2, descriptors2 );
    cout << ">" << endl;

    cout << "< Matching descriptors..." << endl;
    vector<DMatch> filteredMatches;
    crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );
    cout << ">" << endl;

    vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
    for( size_t i = 0; i < filteredMatches.size(); i++ )
    {
        queryIdxs[i] = filteredMatches[i].queryIdx;
        trainIdxs[i] = filteredMatches[i].trainIdx;
    }

    cout << "< Computing homography (RANSAC)..." << endl;
    vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
    vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
    H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold );
    cout << ">" << endl;

    //Mat drawImg;
    if( !H12.empty() ) // filter outliers
    {
        vector<char> matchesMask( filteredMatches.size(), 0 );
        vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
        vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
        Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
        for( size_t i1 = 0; i1 < points1.size(); i1++ )
        {
            if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) < 4 ) // inlier
                matchesMask[i1] = 1;
        }
        /* draw inliers
        drawMatches( leftImg, keypoints1, rightImg, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask, 2 ); */
    }

    Size imageSize = leftImg.size();
    Mat F = findFundamentalMat(Mat(points1), Mat(points2), FM_8POINT, 0, 0);
    Mat H1, H2;
    stereoRectifyUncalibrated(Mat(points1), Mat(points2), F, imageSize, H1, H2, 3);

    Mat cameraMatrix[2], distCoeffs[2], R1, R2, P1, P2, rmap[2][2];
    cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
    cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
    R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
    R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
    P1 = cameraMatrix[0];
    P2 = cameraMatrix[1];

    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    Mat canvas, img;
    double sf;
    int i, j, w, h;

    sf = 600./MAX(imageSize.width, imageSize.height);
    w = cvRound(imageSize.width*sf);
    h = cvRound(imageSize.height*sf);
    canvas.create(h, w*2, CV_8UC3);

    for (i = 0; i < 2; i++)
    {
        if (i == 0)
            img = leftImg;
        else
            img = rightImg;

        Mat rimg, cimg;
        remap(img, rimg, rmap[i][0], rmap[i][1], CV_INTER_LINEAR);
        cvtColor(rimg, cimg, CV_GRAY2BGR);
        Mat canvasPart = canvas(Rect(w*i, 0, w, h));
        resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
    }

        for( j = 0; j < canvas.rows; j += 16 )
        {
            line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        }

        imshow(winName, canvas);
}


int main(int argc, char** argv)
{
    if( argc != 6 )
    {
        help(argv);
        return -1;
    }
    double ransacReprojThreshold = atof(argv[5]);


    cout << "< Creating detector, descriptor extractor and descriptor matcher ..." << endl;
    Ptr<FeatureDetector> detector = FeatureDetector::create( argv[1] );
    Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( argv[2] );
    Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create("FlannBased");
    cout << ">" << endl;
    if( detector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty()  )
    {
        cout << "Can not create detector or descriptor extractor or descriptor matcher of given types" << endl;
        return -1;
    }

    cout << "< Reading the images..." << endl;
    Mat leftImg = imread( argv[3] );
    Mat rightImg = imread( argv[4] );
    cout << ">" << endl;
    if( leftImg.empty() || ( rightImg.empty()) )
    {
        cout << "Can not read images" << endl;
        return -1;
    }

    cout << endl << "< Extracting keypoints from first image..." << endl;
    vector<KeyPoint> keypoints1;
    detector->detect( leftImg, keypoints1 );
    cout << keypoints1.size() << " points" << endl << ">" << endl;

    cout << "< Computing descriptors for keypoints from first image..." << endl;
    Mat descriptors1;
    descriptorExtractor->compute( leftImg, keypoints1, descriptors1 );
    cout << ">" << endl;

    namedWindow(winName, CV_WINDOW_NORMAL);
    doIteration( leftImg, rightImg, keypoints1, descriptors1,
                 detector, descriptorExtractor, descriptorMatcher,
                 ransacReprojThreshold );
    for(;;)
    {
        char c = (char)waitKey(0);
        if( c == '\x1b' ) // esc
        {
            cout << "Exiting ..." << endl;
            return 0;
        }
    }
    waitKey(0);
    return 0;
}

主要焦点应该围绕doIteration方法,

The main focus should probably be around the doIteration method, but I've put the rest of it in there so you can see exactly what is going on.

推荐答案

也许这太晚了)
我没有看看你的代码。但在我看来,你忘了将图像转换为灰色风格。

Maybe that's too late;) I did't look through your code. But it seems to me you forgot to convert image into gray style.

这篇关于OpenCV立体图像对校正...显示结果的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆