如何从 OpenCV Java 中的 HoughLines 变换中检测矩形 [英] How to detect rectangle from HoughLines transform in OpenCV Java

查看:39
本文介绍了如何从 OpenCV Java 中的 HoughLines 变换中检测矩形的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我知道这是重复的帖子,但仍然坚持执行.我遵循互联网上的一些指南,了解如何在 OpenCV 和 Java 中检测图像中的文档.我想出的第一个方法是在预处理一些图像处理(如模糊、边缘检测)后使用 findContours,在获得所有轮廓后,我可以找到最大的轮廓,并假设这是我正在寻找的矩形,但它失败了在某些情况下,例如文件没有被完全取走,就像遗漏了一个角落.在尝试了几次和一些新的处理但根本不起作用之后,我发现 HoughLine 变换变得更容易了.从现在开始,我在图像中拥有所有线条,但仍然不知道接下来要做什么来定义我想要的兴趣矩形.这是我到目前为止的实现代码:方法 1:使用 findContours

Mat grayImage = new Mat();Mat detectionEdges = new Mat();//转换为灰度Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);//使用 3x3 内核降低噪声//Imgproc.blur(grayImage, detectionEdges, new Size(3, 3));imgproc.medianBlur(grayImage,detectedEdges,9);//Imgproc.equalizeHist(detectedEdges,detectedEdges);//Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);垫边 = new Mat();//精明的检测器,下限:上限阈值为 3:1Imgproc.Canny(detectedEdges,edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);//使白色的对象变大Imgproc.dilate(edges, edge, new Mat(), new Point(-1, -1), 1);//1图像 imageToShow = Utils.mat2Image(edges);updateImageView(cannyFrame, imageToShow);///查找轮廓列表轮廓 = 新的 ArrayList();Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);//遍历轮廓MatOfPoint2f approxCurve;双最大面积 = 0;int maxId = -1;对于(MatOfPoint 轮廓:轮廓){MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());双面积 = Imgproc.contourArea(contour);approxCurve = new MatOfPoint2f();Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);if (approxCurve.total() == 4 && area >= maxArea) {双最大余弦 = 0;列表<点>曲线 = approxCurve.toList();for (int j = 2; j <5; j++) {双余弦 = Math.abs(angle(curves.get(j % 4), curve.get(j - 2), curve.get(j - 1)));maxCosine = Math.max(maxCosine, cosine);}如果 (maxCosine <0.3) {最大面积 = 面积;maxId = contours.indexOf(contour);}}}MatOfPoint maxMatOfPoint = contours.get(maxId);MatOfPoint2f maxMatOfPoint2f = new MatOfPoint2f(maxMatOfPoint.toArray());RotatedRect rect = Imgproc.minAreaRect(maxMatOfPoint2f);System.out.println("矩形角:" + rect.angle);点点[] = 新点[4];rect.points(points);for (int i = 0; i <4; ++i) {Imgproc.line(frame, points[i], points[(i + 1) % 4], new Scalar(255, 255, 25), 3);}Mat dest = new Mat();frame.copyTo(dest, frame);返回目的地;

Apparch 2:使用 HoughLine 变换

//STEP 1:边缘检测Mat grayImage = new Mat();Mat detectionEdges = new Mat();矢量<点>start = new Vector();矢量<点>end = new Vector();//转换为灰度Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);//使用 3x3 内核减少噪声//Imgproc.blur(grayImage, detectionEdges, new Size(3, 3));imgproc.medianBlur(grayImage,detectedEdges,9);//Imgproc.equalizeHist(detectedEdges,detectedEdges);//Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);//自适应阈值 ->分类为黑色或白色//Imgproc.adaptiveThreshold(detectedEdges, detectedEdges, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 5, 2);//Imgproc.Sobel(detectedEdges, detectedEdges, -1, 1, 0);垫边 = new Mat();//精明的检测器,下限:上限比例为 3:1Imgproc.Canny(detectedEdges,edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);//应用高斯模糊来平滑点线Imgproc.GaussianBlur(edges,edges, new org.opencv.core.Size(5, 5), 5);//使白色的对象变大Imgproc.dilate(edges, edge, new Mat(), new Point(-1, -1), 1);//1图像 imageToShow = Utils.mat2Image(edges);updateImageView(cannyFrame, imageToShow);//STEP 2: 线路检测//做霍夫线垫线 = new Mat();int minLineSize = 50;int lineGap = 10;Imgproc.HoughLinesP(edges, lines, 1, Math.PI/720, (int) this.threshold.getValue(), this.minLineSize.getValue(), lineGap);System.out.println("MinLineSize:" + this.minLineSize.getValue());System.out.println(lines.rows());for (int i = 0; i 

vectorgetQuadrilateral(Mat & 灰度,Mat& 输出){Mat approxPoly_mask(grayscale.rows, grayscale.cols, CV_8UC1);approxPoly_mask = 标量(0);矢量<矢量<点>>轮廓;findContours(灰度,轮廓,RETR_EXTERNAL,CHAIN_APPROX_NONE);向量指数(contours.size());iota(indices.begin(), index.end(), 0);排序(indices.begin(),indices.end(),[&contours](int lhs,int rhs){返回轮廓 [lhs].size() >轮廓[rhs].size();});///找到每个轮廓的凸包对象矢量<矢量<点>>船体(1);凸包(垫(轮廓[索引[0]]),船体[0],假);矢量<矢量<点>>多边形(1);approxPolyDP(hull[0],polygon[0],20,true);drawContours(approxPoly_mask,多边形,0,标量(255));imshow(approxPoly_mask",approxPoly_mask);if (polygon[0].size() >= 4)//我们找到了 4 个角{返回(多边形[0]);}返回(向量<点>());}int main(int argc, char** argv){Mat input = imread(papersheet1.JPG");调整大小(输入,输入,大小(),0.1,0.1);垫 input_grey;cvtColor(输入,input_grey,CV_BGR2GRAY);垫阈值1;垫子边缘;模糊(输入灰色,输入灰色,大小(3,3));Canny(input_grey, edge, 30, 100);矢量<点>card_corners = getQuadrilateral(edges, input);Mat warpedCard(400, 300, CV_8UC3);如果 (card_corners.size() == 4){Mat homography = findHomography(card_corners, vector{Point(warpedCard.cols, warpedCard.rows), Point(0, warpedCard.rows), Point(0, 0), Point(warpedCard.cols, 0)});warpPerspective(输入,warpedCard,单应性,大小(warpedCard.cols,warpedCard.rows));}imshow(扭曲的卡片",扭曲的卡片);imshow(边缘",边缘);imshow(输入",输入);等待键(0);返回0;}

这是 C++ 代码,但翻译成 Java 应该不难.

I know this is duplicated post but still get stuck on implementation. I following some guide on the internet in how to detect document in an image in OpenCV and Java. The first approarch i came up with is that use the findContours after pre-process some image processing like blur, edge detection, after get all the contours i can found the largest contour and assume that is a rectangle i'm looking for but it fail in some case, e.g the document is not fully taken like missing one corner. After trying several time and some new processing but does not work at all, i found that the HoughLine transform take it easier. From now i have all the line inside an image but still do not what to do next to defined the interest rectangle that i want. Here is the implementation code i have so far: Approach 1: Using findContours

Mat grayImage = new Mat();
    Mat detectedEdges = new Mat();
    // convert to grayscale
    Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
    // reduce noise with a 3x3 kernel
    // Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
    Imgproc.medianBlur(grayImage, detectedEdges, 9);
    // Imgproc.equalizeHist(detectedEdges, detectedEdges);
    // Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
    Mat edges = new Mat();
    // canny detector, with ratio of lower:upper threshold of 3:1
    Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
    // makes the object in white bigger
    Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
    Image imageToShow = Utils.mat2Image(edges);
    updateImageView(cannyFrame, imageToShow);
    /// Find contours
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    // loop over the contours
    MatOfPoint2f approxCurve;
    double maxArea = 0;
    int maxId = -1;
    for (MatOfPoint contour : contours) {
        MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
        double area = Imgproc.contourArea(contour);
        approxCurve = new MatOfPoint2f();
        Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
        if (approxCurve.total() == 4 && area >= maxArea) {
            double maxCosine = 0;
            List<Point> curves = approxCurve.toList();
            for (int j = 2; j < 5; j++) {
                double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
                maxCosine = Math.max(maxCosine, cosine);
            }
            if (maxCosine < 0.3) {
                maxArea = area;
                maxId = contours.indexOf(contour);
            }
        }
    }
    MatOfPoint maxMatOfPoint = contours.get(maxId);
    MatOfPoint2f maxMatOfPoint2f = new MatOfPoint2f(maxMatOfPoint.toArray());
    RotatedRect rect = Imgproc.minAreaRect(maxMatOfPoint2f);
    System.out.println("Rect angle: " + rect.angle);
    Point points[] = new Point[4];
    rect.points(points);
    for (int i = 0; i < 4; ++i) {
        Imgproc.line(frame, points[i], points[(i + 1) % 4], new Scalar(255, 255, 25), 3);
    }

    Mat dest = new Mat();
    frame.copyTo(dest, frame);
    return dest;

Apparch 2: Using HoughLine transform

// STEP 1: Edge detection
    Mat grayImage = new Mat();
    Mat detectedEdges = new Mat();
    Vector<Point> start = new Vector<Point>();
    Vector<Point> end = new Vector<Point>();
    // convert to grayscale
    Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
    // reduce noise with a 3x3 kernel
    // Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
    Imgproc.medianBlur(grayImage, detectedEdges, 9);
    // Imgproc.equalizeHist(detectedEdges, detectedEdges);
    // Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
    // AdaptiveThreshold -> classify as either black or white
    // Imgproc.adaptiveThreshold(detectedEdges, detectedEdges, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 5, 2);
    // Imgproc.Sobel(detectedEdges, detectedEdges, -1, 1, 0);
    Mat edges = new Mat();
    // canny detector, with ratio of lower:upper threshold of 3:1
    Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
    // apply gaussian blur to smoothen lines of dots
    Imgproc.GaussianBlur(edges, edges, new org.opencv.core.Size(5, 5), 5);
    // makes the object in white bigger
    Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
    Image imageToShow = Utils.mat2Image(edges);
    updateImageView(cannyFrame, imageToShow);
    // STEP 2: Line detection
    // Do Hough line
    Mat lines = new Mat();
    int minLineSize = 50;
    int lineGap = 10;
    Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 720, (int) this.threshold.getValue(), this.minLineSize.getValue(), lineGap);
    System.out.println("MinLineSize: " + this.minLineSize.getValue());
    System.out.println(lines.rows());
    for (int i = 0; i < lines.rows(); i++) {
        double[] val = lines.get(i, 0);
        Point tmpStartP = new Point(val[0], val[1]);
        Point tmpEndP = new Point(val[2], val[3]);
        start.add(tmpStartP);
        end.add(tmpEndP);
        Imgproc.line(frame, tmpStartP, tmpEndP, new Scalar(255, 255, 0), 2);
    }

    Mat dest = new Mat();
    frame.copyTo(dest, frame);
    return dest;

HoughLine result 1 HoughLine result 2

How to detect needed rectangle from HoughLine result? Can someone give me the next step to complete the HoughLine transform approach. Any help is appriciated. i'm stuck with this for a while.

Thanks you for reading this.

解决方案

This answer is pretty much a mix of two other answers (here and here) I posted. But the pipeline I used for the other answers can be a little bit improved for your case. So I think it's worth posting a new answer.

There are many ways to achieve what you want. However, I don't think that line detection with HoughLinesP is needed here. So here is the pipeline I used on your samples:

Step 1: Detect egdes

  • Resize the input image if it's too large (I noticed that this pipeline works better on down scaled version of a given input image)
  • Blur grayscale input and detect edges with Canny filter

Step 2: Find the card's corners

  • Compute the contours
  • Sort the contours by length and only keep the largest one
  • Generate the convex hull of this contour
  • Use approxPolyDP to simplify the convex hull (this should give a quadrilateral)
  • Create a mask out of the approximate polygon
  • return the 4 points of the quadrilateral

Step 3: Homography

  • Use findHomography to find the affine transformation of your paper sheet (with the 4 corner points found at Step 2)
  • Warp the input image using the computed homography matrix

NOTE: Of course, once you have found the corners of the paper sheet on the down scaled version of the input image, you can easily compute the position of the corners on the full sized input image. This, in order to have the best resolution for the warped paper sheet.

And here is the result:

vector<Point> getQuadrilateral(Mat & grayscale, Mat& output)
{
    Mat approxPoly_mask(grayscale.rows, grayscale.cols, CV_8UC1);
    approxPoly_mask = Scalar(0);

    vector<vector<Point>> contours;
    findContours(grayscale, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);

    vector<int> indices(contours.size());
    iota(indices.begin(), indices.end(), 0);

    sort(indices.begin(), indices.end(), [&contours](int lhs, int rhs) {
        return contours[lhs].size() > contours[rhs].size();
    });

    /// Find the convex hull object for each contour
    vector<vector<Point> >hull(1);
    convexHull(Mat(contours[indices[0]]), hull[0], false);

    vector<vector<Point>> polygon(1);
    approxPolyDP(hull[0], polygon[0], 20, true);
    drawContours(approxPoly_mask, polygon, 0, Scalar(255));
    imshow("approxPoly_mask", approxPoly_mask);

    if (polygon[0].size() >= 4) // we found the 4 corners
    {
        return(polygon[0]);
    }

    return(vector<Point>());
}


int main(int argc, char** argv)
{

    Mat input = imread("papersheet1.JPG");
    resize(input, input, Size(), 0.1, 0.1);
    Mat input_grey;
    cvtColor(input, input_grey, CV_BGR2GRAY);
    Mat threshold1;
    Mat edges;
    blur(input_grey, input_grey, Size(3, 3));
    Canny(input_grey, edges, 30, 100);


    vector<Point> card_corners = getQuadrilateral(edges, input);
    Mat warpedCard(400, 300, CV_8UC3);
    if (card_corners.size() == 4)
    {
        Mat homography = findHomography(card_corners, vector<Point>{Point(warpedCard.cols, warpedCard.rows), Point(0, warpedCard.rows), Point(0, 0), Point(warpedCard.cols, 0)});
        warpPerspective(input, warpedCard, homography, Size(warpedCard.cols, warpedCard.rows));
    }

    imshow("warped card", warpedCard);
    imshow("edges", edges);
    imshow("input", input);
    waitKey(0);

    return 0;
}

This is C++ code, but it shouldn't be to hard to translate into Java.

这篇关于如何从 OpenCV Java 中的 HoughLines 变换中检测矩形的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆