如何检测最大矩形轮廓内的标记黑色区域? [英] How to detect marked black regions inside largest Rectangle Contour?

查看:373
本文介绍了如何检测最大矩形轮廓内的标记黑色区域?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我可以在答题纸上检测到最大轮廓(20个问题,每个都有4个替代)



绘制最大轮廓后,我该怎么办?将矩形划分为20x4的矩形?或者再次找到countour,但这次是在矩形内?我不知道我需要什么。只是我想得到哪个标记。



我看了

  public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame){

返回findLargestRectangle(inputFrame.rgba());
}


private Mat findLargestRectangle(Mat original_image){
Mat imgSource = original_image;
hierarchy = new Mat();

//将图像转换为黑白
Imgproc.cvtColor(imgSource,imgSource,Imgproc.COLOR_BGR2GRAY);

//将图像转换为黑白(8位)
Imgproc.Canny(imgSource,imgSource,50,50);

//应用高斯模糊来平滑点的线条
Imgproc.GaussianBlur(imgSource,imgSource,new Size(5,5),5);

//找到轮廓
List< MatOfPoint> contours = new ArrayList< MatOfPoint>();
Imgproc.findContours(imgSource,contours,hierarchy,Imgproc.RETR_LIST,Imgproc.CHAIN_APPROX_SIMPLE);

hierarchy.release();

double maxArea = -1;
int maxAreaIdx = -1;
MatOfPoint temp_contour = contours.get(0); //最大的是起点的索引0
MatOfPoint2f approxCurve = new MatOfPoint2f();
Mat largest_contour = contours.get(0);
列表< MatOfPoint> largest_contours = new ArrayList< MatOfPoint>();
for(int idx = 0; idx< contours.size(); idx ++){
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
//将此轮廓与之前找到的最大轮廓比较
if(contourarea> maxArea){
//检查此轮廓是否为正方形
MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray());
int contourSize =(int)temp_contour.total();
Imgproc.approxPolyDP(new_mat,approxCurve,contourSize * 0.05,true);
if(approxCurve.total()== 4){
maxArea = contourarea;
maxAreaIdx = idx;
largest_contours.add(temp_contour);
largest_contour = temp_contour;
}
}
}
MatOfPoint temp_largest = largest_contours.get(largest_contours.size() - 1);
largest_contours = new ArrayList< MatOfPoint>();
largest_contours.add(temp_largest);


Imgproc.cvtColor(imgSource,imgSource,Imgproc.COLOR_BayerBG2RGB);
Imgproc.drawContours(imgSource,contours,maxAreaIdx,new Scalar(0,255,0),1);
Log.d(TAG,Largers Contour:+ contours.get(maxAreaIdx).toString());


返回imgSource;
}

更新1:



我要感谢@sturkmen的回答。我现在可以阅读并找到黑色区域。这里是Android代码:

  public查看onCreateView(LayoutInflater inflater,ViewGroup容器,
Bundle savedInstanceState){
查看_view = inflater.inflate(R.layout.fragment_main,container,false);
//为此片段扩充布局


按钮btnTest =(按钮)_view.findViewById(R.id.btnTest);
btnTest.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View v){

Mat img = Imgcodecs.imread(mediaStorageDir( ).getPath()+/+test2.jpg);
if(img.empty()){
Log.d(Fragment,IMG EMPTY);
}


Mat gray = new Mat();
Mat thresh = new Mat();

//将图像转换为黑色白色
Imgproc.cvtColor(img,grey,Imgproc.COLOR_BGR2GRAY);

//将图像转换为黑白(8位)
Imgproc.threshold(灰色,thresh,0,255,Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
Mat temp = thresh.clone();
//找到轮廓
Mat hierarchy = new Mat();

Mat corner = new Mat(4,1,CvType.CV_32FC2);
List< MatOfPoint> contours = new ArrayLi ST< MatOfPoint>();
Imgproc.findContours(temp,contours,hierarchy,Imgproc.RETR_EXTERNAL,Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();

for(int idx = 0; idx< contours.size(); idx ++)
{
MatOfPoint contour = contours.get(idx);
MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
RotatedRect minRect = Imgproc.minAreaRect(contour_points);
Point [] rect_points = new Point [4];
minRect.points(rect_points);
if(minRect.size.height> img.width()/ 2)
{
List< Point> srcPoints = new ArrayList< Point>(4);
srcPoints.add(rect_points [2]);
srcPoints.add(rect_points [3]);
srcPoints.add(rect_points [0]);
srcPoints.add(rect_points [1]);

corner = Converters.vector_Point_to_Mat(
srcPoints,CvType.CV_32F);
}

}
Imgproc.erode(thresh,thresh,new Mat(),new Point(-1,-1),10);
Imgproc.dilate(thresh,thresh,new Mat(),new Point(-1,-1),5);
Mat results = new Mat(1000,250,CvType.CV_8UC3);
Mat quad = new Mat(1000,250,CvType.CV_8UC1);

列表<点> dstPoints = new ArrayList< Point>(4);
dstPoints.add(new Point(0,0));
dstPoints.add(new Point(1000,0));
dstPoints.add(new Point(1000,250));
dstPoints.add(new Point(0,250));
Mat quad_pts = Converters.vector_Point_to_Mat(
dstPoints,CvType.CV_32F);

Mat transmtx = Imgproc.getPerspectiveTransform(corner,quad_pts);
Imgproc.warpPerspective(img,results,transmtx,new Size(1000,250));
Imgproc.warpPerspective(thresh,quad,transmtx,new Size(1000,250));

Imgproc.resize(quad,quad,new Size(20,5));

Imgcodecs.imwrite(results.png,quad);

//显示图片
showImage(quad);

//商店图片
storeImage(quad);

}

});

return _view;
}

public void showImage(Mat img){
ImageView imgView =(ImageView)getActivity()。findViewById(R.id.sampleImageView);
// Mat mRgba = new Mat();

// mRgba = Utils.loadResource(MainAct.this,R.drawable.your_image,Highgui.CV_LOAD_IMAGE_COLOR);
Bitmap img2 = Bitmap.createBitmap(img.cols(),img.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(img,img2);
imgView.setImageBitmap(img2);
}

public File mediaStorageDir(){
File _mediaStorageDir = new File(Environment.getExternalStorageDirectory()
+/ Android / data /
+ getActivity()。getApplicationContext()。getPackageName());

返回_mediaStorageDir;
}

public void storeImage(Mat matImg){

Bitmap bitmapImg = Bitmap.createBitmap(matImg.cols(),matImg.rows(),Bitmap。 Config.ARGB_8888);
Utils.matToBitmap(matImg,bitmapImg);
String timeStamp = new SimpleDateFormat(ddMMyyyy_HHmm)。format(new Date());
文件mediaFile;
String mImageName =IMG _+ timeStamp +。jpg;
mediaFile = new File(mediaStorageDir()。getPath()+ File.separator + mImageName);

文件pictureFile = mediaFile;

try {
FileOutputStream fos = new FileOutputStream(pictureFile);
bitmapImg.compress(Bitmap.CompressFormat.PNG,90,fos);
fos.close();
} catch(FileNotFoundException e){
Log.d(FragmentMain,File not found:+ e.getMessage());
} catch(IOException e){
Log.d(FragmentMain,访问文件时出错:+ e.getMessage());
}
}


解决方案

这里是我的试用代码作为样本。



我希望它会有所帮助。 (我稍后会添加一些关于代码的说明)



测试图像(编辑你的图片。有一个空的无效双标记)



图片说明http://answers.opencv .org / upfiles / 14639435664447751.jpg



结果图片



图片说明http://answers.opencv.org/upfiles/14639682486508377.jpeg

  #include< opencv2 / highgui.hpp> 
#include< opencv2 / imgproc.hpp>使用命名空间cv

;
using namespace std;

int main(int argc,const char ** argv)
{
Mat img = imread(argv [1]);
if(img.empty())
{
return -1;
}

尺寸变暗(20,5); //这个变量应该根据输入改变
Mat grey,thresh;
cvtColor(img,grey,COLOR_BGR2GRAY);
threshold(灰色,thresh,0,255,THRESH_BINARY_INV + THRESH_OTSU);

Mat quad(img.size(),CV_8UC1); //应该改进
Mat结果(img.size(),CV_8UC3);

vector< Point2f> quad_pts;
quad_pts.push_back(cv :: Point2f(0,0));
quad_pts.push_back(cv :: Point2f(quad.cols,0));
quad_pts.push_back(cv :: Point2f(quad.cols,quad.rows));
quad_pts.push_back(cv :: Point2f(0,quad.rows));

vector< Point2f>角落;
vector< vector< Point> >轮廓;

findContours(thresh.clone(),contours,RETR_EXTERNAL,CHAIN_APPROX_SIMPLE);

for(size_t i = 0; i< contours.size(); i ++)
{
RotatedRect minRect = minAreaRect(Mat(contours [i]));

//旋转矩形
Point2f rect_points [4];
minRect.points(rect_points);

if(Rect(minRect.boundingRect())。width> img.cols / 2)//应该改进
for(int j = 0; j< 4; j ++ )
{
Point2f pt = quad_pts [j];
Point2f nearest_pt = rect_points [0];
float dist = norm(pt - nearest_pt);
for(int k = 1; k< 4; k ++)
{
if(norm(pt - rect_points [k])< dist)
{
dist = norm(pt - rect_points [k]);
nearest_pt = rect_points [k];
}
}
corners.push_back(nearest_pt);
}
}

erode(thresh,thresh,Mat(),Point(-1,-1),10); //应该改进
dilate(thresh,thresh,Mat(),Point(-1,-1),5); //应该改进

Mat transmtx = getPerspectiveTransform(corner,quad_pts);
warpPerspective(img,results,transmtx,img.size()); //创建一个Mat来显示结果
warpPerspective(thresh,quad,transmtx,img.size());

调整大小(quad,quad,dims);

for(int i = 0; i< quad.cols; i ++)
{
String answer =;

回答+ = quad.at< uchar>(1,i)== 0? : 一个;
回答+ = quad.at< uchar>(2,i)== 0? :B;
回答+ = quad.at< uchar>(3,i)== 0? : C;
回答+ = quad.at< uchar>(4,i)== 0? :D;

if(answer.length()> 1)answer =X; //双重标记
int y = 0;
if(answer ==A)y = results.rows / dims.height;
if(answer ==B)y = results.rows / dims.height * 2;
if(answer ==C)y = results.rows / dims.height * 3;
if(answer ==D)y = results.rows / dims.height * 4;
if(answer ==)answer =[ - ];
putText(结果,答案,Point(50 * i + 15,30 + y),FONT_HERSHEY_PLAIN,2,Scalar(0,0,255),2);

}
imshow(结果,结果);
waitKey(0);

返回0;
}

作为对自己的挑战我试图在JAVA中实现主要部分(一个新人)复制粘贴代码)

  Mat img = Imgcodecs.imread(test.jpg); 
Mat gray = new Mat();
Mat thresh = new Mat();

//将图像转换为黑白
Imgproc.cvtColor(img,grey,Imgproc.COLOR_BGR2GRAY);

//将图像转换为黑白(8位)
Imgproc.threshold(灰色,阈值,0,255,Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
Mat temp = thresh.clone();
//找到轮廓
Mat hierarchy = new Mat();

Mat corner = new Mat(4,1,CvType.CV_32FC2);
列表< MatOfPoint> contours = new ArrayList< MatOfPoint>();
Imgproc.findContours(temp,contours,hierarchy,Imgproc.RETR_EXTERNAL,Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();

for(int idx = 0; idx< contours.size(); idx ++)
{
MatOfPoint contour = contours.get(idx);
MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
RotatedRect minRect = Imgproc.minAreaRect(contour_points);
Point [] rect_points = new Point [4];
minRect.points(rect_points);
if(minRect.size.height> img.width()/ 2)
{
List< Point> srcPoints = new ArrayList< Point>(4);
srcPoints.add(rect_points [2]);
srcPoints.add(rect_points [3]);
srcPoints.add(rect_points [0]);
srcPoints.add(rect_points [1]);

corner = Converters.vector_Point_to_Mat(
srcPoints,CvType.CV_32F);
}

}
Imgproc.erode(thresh,thresh,new Mat(),new Point(-1,-1),10);
Imgproc.dilate(thresh,thresh,new Mat(),new Point(-1,-1),5);
Mat results = new Mat(1000,250,CvType.CV_8UC3);
Mat quad = new Mat(1000,250,CvType.CV_8UC1);

列表<点> dstPoints = new ArrayList< Point>(4);
dstPoints.add(new Point(0,0));
dstPoints.add(new Point(1000,0));
dstPoints.add(new Point(1000,250));
dstPoints.add(new Point(0,250));
Mat quad_pts = Converters.vector_Point_to_Mat(
dstPoints,CvType.CV_32F);

Mat transmtx = Imgproc.getPerspectiveTransform(corner,quad_pts);
Imgproc.warpPerspective(img,results,transmtx,new Size(1000,250));
Imgproc.warpPerspective(thresh,quad,transmtx,new Size(1000,250));

Imgproc.resize(quad,quad,new Size(20,5));

Imgcodecs.imwrite(results.png,quad);

这里是(20x5px)结果图像:图片说明http://answers.opencv.org/upfiles/14639684166300772.png


I can detect largest contour the answer sheet (20 questions, each have 4 alternative)

After the draw largest contour, what shall I do? Divide matris the rectangle by 20x4 cell? Or find countour again but this time inside the rectangle? I dont know what I need. Just I want to get which is marked.

I looked at this documant.

How to codding "image gridding and division"?

 public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {

        return findLargestRectangle(inputFrame.rgba());
    }


    private Mat findLargestRectangle(Mat original_image) {
        Mat imgSource = original_image;
        hierarchy = new Mat();

        //convert the image to black and white
        Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);

        //convert the image to black and white does (8 bit)
        Imgproc.Canny(imgSource, imgSource, 50, 50);

        //apply gaussian blur to smoothen lines of dots
        Imgproc.GaussianBlur(imgSource, imgSource, new Size(5, 5), 5);

        //find the contours
        List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
        Imgproc.findContours(imgSource, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

        hierarchy.release();

        double maxArea = -1;
        int maxAreaIdx = -1;
        MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
        MatOfPoint2f approxCurve = new MatOfPoint2f();
        Mat largest_contour = contours.get(0);
        List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
        for (int idx = 0; idx < contours.size(); idx++) {
            temp_contour = contours.get(idx);
            double contourarea = Imgproc.contourArea(temp_contour);
            //compare this contour to the previous largest contour found
            if (contourarea > maxArea) {
                //check if this contour is a square
                MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
                int contourSize = (int)temp_contour.total();
                Imgproc.approxPolyDP(new_mat, approxCurve, contourSize*0.05, true);
                if (approxCurve.total() == 4) {
                    maxArea = contourarea;
                    maxAreaIdx = idx;
                    largest_contours.add(temp_contour);
                    largest_contour = temp_contour;
                }
            }
        }
        MatOfPoint temp_largest = largest_contours.get(largest_contours.size()-1);
        largest_contours = new ArrayList<MatOfPoint>();
        largest_contours.add(temp_largest);


        Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
        Imgproc.drawContours(imgSource, contours, maxAreaIdx, new Scalar(0, 255, 0), 1);
        Log.d(TAG, "Largers Contour:" + contours.get(maxAreaIdx).toString());


        return imgSource;
    }

UPDATE 1:

I want to thank you @sturkmen for the his answer. I can read and find black regions now. Here the Android codes:

   public View onCreateView(LayoutInflater inflater, ViewGroup container,
                         Bundle savedInstanceState) {
    View _view = inflater.inflate(R.layout.fragment_main, container, false);
    // Inflate the layout for this fragment


    Button btnTest = (Button) _view.findViewById(R.id.btnTest);
    btnTest.setOnClickListener(new View.OnClickListener() {
        @Override
        public void onClick(View v) {

            Mat img = Imgcodecs.imread(mediaStorageDir().getPath() + "/" + "test2.jpg");
            if (img.empty()) {
                Log.d("Fragment", "IMG EMPTY");
            }


            Mat gray = new Mat();
            Mat thresh = new Mat();

            //convert the image to black and white
            Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);

            //convert the image to black and white does (8 bit)
            Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
            Mat temp = thresh.clone();
            //find the contours
            Mat hierarchy = new Mat();

            Mat corners = new Mat(4,1, CvType.CV_32FC2);
            List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
            Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
            hierarchy.release();

            for (int idx = 0; idx < contours.size(); idx++)
            {
                MatOfPoint contour = contours.get(idx);
                MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
                RotatedRect minRect = Imgproc.minAreaRect( contour_points );
                Point[] rect_points = new Point[4];
                minRect.points( rect_points );
                if(minRect.size.height > img.width() / 2)
                {
                    List<Point> srcPoints = new ArrayList<Point>(4);
                    srcPoints.add(rect_points[2]);
                    srcPoints.add(rect_points[3]);
                    srcPoints.add(rect_points[0]);
                    srcPoints.add(rect_points[1]);

                    corners = Converters.vector_Point_to_Mat(
                            srcPoints, CvType.CV_32F);
                }

            }
            Imgproc.erode(thresh, thresh, new Mat(), new Point(-1,-1), 10);
            Imgproc.dilate(thresh, thresh, new Mat(), new Point(-1,-1), 5);
            Mat results = new Mat(1000,250,CvType.CV_8UC3);
            Mat quad = new Mat(1000,250,CvType.CV_8UC1);

            List<Point> dstPoints = new ArrayList<Point>(4);
            dstPoints.add(new Point(0, 0));
            dstPoints.add(new Point(1000, 0));
            dstPoints.add(new Point(1000, 250));
            dstPoints.add(new Point(0, 250));
            Mat quad_pts = Converters.vector_Point_to_Mat(
                    dstPoints, CvType.CV_32F);

            Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
            Imgproc.warpPerspective( img, results, transmtx, new Size(1000,250));
            Imgproc.warpPerspective( thresh, quad, transmtx, new Size(1000,250));

            Imgproc.resize(quad,quad,new Size(20,5));

            Imgcodecs.imwrite("results.png",quad);

            //show image
            showImage(quad);

            //store image
            storeImage(quad);

        }

    });

    return _view;
}

public void showImage (Mat img) {
    ImageView imgView = (ImageView) getActivity().findViewById(R.id.sampleImageView);
    //Mat mRgba = new Mat();

    //mRgba = Utils.loadResource(MainAct.this, R.drawable.your_image,Highgui.CV_LOAD_IMAGE_COLOR);
    Bitmap img2 = Bitmap.createBitmap(img.cols(), img.rows(),Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(img, img2);
    imgView.setImageBitmap(img2);
}

public File mediaStorageDir () {
    File _mediaStorageDir = new File(Environment.getExternalStorageDirectory()
            + "/Android/data/"
            + getActivity().getApplicationContext().getPackageName());

    return _mediaStorageDir;
}

public void storeImage(Mat matImg) {

    Bitmap bitmapImg = Bitmap.createBitmap(matImg.cols(), matImg.rows(),Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(matImg, bitmapImg);
    String timeStamp = new SimpleDateFormat("ddMMyyyy_HHmm").format(new Date());
    File mediaFile;
    String mImageName="IMG_"+ timeStamp +".jpg";
    mediaFile = new File(mediaStorageDir().getPath() + File.separator + mImageName);

    File pictureFile = mediaFile;

    try {
        FileOutputStream fos = new FileOutputStream(pictureFile);
        bitmapImg.compress(Bitmap.CompressFormat.PNG, 90, fos);
        fos.close();
    } catch (FileNotFoundException e) {
        Log.d("FragmentMain", "File not found: " + e.getMessage());
    } catch (IOException e) {
        Log.d("FragmentMain", "Error accessing file: " + e.getMessage());
    }
}

解决方案

here is my trial code as a sample.

i hope it will be helpful. ( i will add some explanation about the code later)

Test Image ( edited your image. having an empty and invalid double mark )

image description http://answers.opencv.org/upfiles/14639435664447751.jpg

Result Image

image description http://answers.opencv.org/upfiles/14639682486508377.jpeg

#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

using namespace cv;
using namespace std;

int main( int argc, const char** argv )
{
    Mat img = imread(argv[1]);
    if(img.empty())
    {
        return -1;
    }

    Size dims(20,5); // this variable should be changed according input
    Mat gray,thresh;
    cvtColor(img, gray, COLOR_BGR2GRAY);
    threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);

    Mat quad(img.size(), CV_8UC1); // should be improved
    Mat results(img.size(), CV_8UC3);

    vector<Point2f> quad_pts;
    quad_pts.push_back(cv::Point2f(0, 0));
    quad_pts.push_back(cv::Point2f(quad.cols, 0));
    quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
    quad_pts.push_back(cv::Point2f(0, quad.rows));

    vector<Point2f> corners;
    vector<vector<Point> > contours;

    findContours(thresh.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);

    for( size_t i = 0; i< contours.size(); i++ )
    {
        RotatedRect minRect = minAreaRect( Mat(contours[i]) );

        // rotated rectangle
        Point2f rect_points[4];
        minRect.points( rect_points );

        if(Rect(minRect.boundingRect()).width > img.cols / 2) // should be improved
            for( int j = 0; j < 4; j++ )
            {
              Point2f pt = quad_pts[j];
              Point2f nearest_pt = rect_points[0];
              float dist = norm( pt - nearest_pt );
                for( int k = 1; k < 4; k++ )
                {
                 if( norm( pt - rect_points[k] ) < dist )
                 {
                   dist = norm( pt - rect_points[k] );
                   nearest_pt = rect_points[k];
                 }
                }
                corners.push_back( nearest_pt );
            }
    }

    erode(thresh,thresh,Mat(),Point(-1,-1), 10); // should be improved
    dilate(thresh,thresh,Mat(),Point(-1,-1), 5); // should be improved

    Mat transmtx = getPerspectiveTransform(corners, quad_pts);
    warpPerspective( img, results, transmtx, img.size()); // Create a Mat To Show results
    warpPerspective( thresh, quad, transmtx, img.size());

    resize(quad,quad,dims);

    for(int i = 0; i < quad.cols; i++)
    {
        String answer = "";

        answer += quad.at<uchar>(1,i) == 0 ? "" : "A";
        answer += quad.at<uchar>(2,i) == 0 ? "" : "B";
        answer += quad.at<uchar>(3,i) == 0 ? "" : "C";
        answer += quad.at<uchar>(4,i) == 0 ? "" : "D";

        if( answer.length()  > 1 ) answer = "X"; // Double mark
        int y = 0;
        if( answer == "A" ) y = results.rows / dims.height;
        if( answer == "B" ) y = results.rows / dims.height *2;
        if( answer == "C" ) y = results.rows / dims.height *3;
        if( answer == "D" ) y = results.rows / dims.height *4;
        if( answer == "" ) answer = "[-]";
        putText( results, answer, Point( 50* i + 15, 30 + y), FONT_HERSHEY_PLAIN, 2, Scalar(0,0,255),2);

    }
    imshow( "results", results );
    waitKey(0);

    return 0;
}

as a challenge to myself i tried to implement main part in JAVA ( a newcomer copy paste code )

Mat img = Imgcodecs.imread("test.jpg");
Mat gray = new Mat();
Mat thresh = new Mat();

//convert the image to black and white
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);

//convert the image to black and white does (8 bit)
Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
Mat temp = thresh.clone();
//find the contours
Mat hierarchy = new Mat();

Mat corners = new Mat(4,1,CvType.CV_32FC2);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();

for (int idx = 0; idx < contours.size(); idx++)
{
    MatOfPoint contour = contours.get(idx);
    MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
    RotatedRect minRect = Imgproc.minAreaRect( contour_points );
    Point[] rect_points = new Point[4];
    minRect.points( rect_points );
    if(minRect.size.height > img.width() / 2)
    {
        List<Point> srcPoints = new ArrayList<Point>(4);
        srcPoints.add(rect_points[2]);
        srcPoints.add(rect_points[3]);
        srcPoints.add(rect_points[0]);
        srcPoints.add(rect_points[1]);

        corners = Converters.vector_Point_to_Mat(
                      srcPoints, CvType.CV_32F);
    }

}
Imgproc.erode(thresh, thresh, new Mat(), new Point(-1,-1), 10);
Imgproc.dilate(thresh, thresh, new Mat(), new Point(-1,-1), 5);
Mat results = new Mat(1000,250,CvType.CV_8UC3);
Mat quad = new Mat(1000,250,CvType.CV_8UC1);

List<Point> dstPoints = new ArrayList<Point>(4);
dstPoints.add(new Point(0, 0));
dstPoints.add(new Point(1000, 0));
dstPoints.add(new Point(1000, 250));
dstPoints.add(new Point(0, 250));
Mat quad_pts = Converters.vector_Point_to_Mat(
                   dstPoints, CvType.CV_32F);

Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
Imgproc.warpPerspective( img, results, transmtx, new Size(1000,250));
Imgproc.warpPerspective( thresh, quad, transmtx, new Size(1000,250));

Imgproc.resize(quad,quad,new Size(20,5));

Imgcodecs.imwrite("results.png",quad);

here is the (20x5px) result image : image description http://answers.opencv.org/upfiles/14639684166300772.png

这篇关于如何检测最大矩形轮廓内的标记黑色区域?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆