openCV 2.4.10 bwlabel - 连接组件 [英] openCV 2.4.10 bwlabel - connected components

查看:1034
本文介绍了openCV 2.4.10 bwlabel - 连接组件的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

这是matlab的原始代码:

 %计算每个分离的对象区域
cDist = regionprops(bwImg ,'Area');
cDist = [cDist.Area];

%标记每个对象
[bwImgLabeled,〜] = bwlabel(bwImg);

%基于颜色假设计算最小和最大对象大小
%checker size
maxLabelSize = prod(size(imageData)./ [4 6]);
minLabelSize = prod(size(imageData)./ [4 6] ./ 10);

%查找太大或太小的对象的标签索引
remInd = find(cDist> maxLabelSize);
remInd = [remInd find(cDist< minLabelSize)];

%删除过大/过小的对象
对于n = 1:length(remInd)
ri = bwImgLabeled == remInd(n);
bwImgLabeled(ri)= 0;

这是我的代码使用openCV

  // regionprops(bwImg,'Area'); 
// cDist = [cDist.Area]
// cv :: FileStorage file(C:\\Users\\gdarmon\\Desktop\\gili.txt ,cv :: FileStorage :: WRITE);
//
// file<< dst;
dst.convertTo(dst,CV_8U);
cv :: vector< cv :: vector< cv :: Point> >轮廓;
cv :: vector< cv :: Vec4i>层次;
cv :: findContours(dst,contoururs,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_NONE);

std :: vector< cv :: Moments> mu(contours.size());
for(int i = 0; i {
mu [i] = cv :: moments(contours [i],false);
}
vector< cv :: Point2f> mc(contoururs.size());
for(int i = 0; i {
mc [i] = cv :: Point2f(mu [i] .m10 / i] .m00,mu [i] .m01 / mu [i] .m00);
}

从现在开始ihave轮廓我想用户bwlabel函数

1.我认为标签是为了连接4-8对象。
你能解释一下贴标签是什么?我将删除任何链接。

2.在OpenCV中连接组件
在这篇文章有些人在谈论CVblob和一些关于opecv的cvContourArea,你能解释一下差别。



更新:
这里是我用cvBlobs尝试过的

  IplImage * img_bw = new IplImage(dst); 
CBlobResult blobs;
CBlob * currentBlob;
blobs = CBlobResult(img_bw,NULL,0);
//排除所有小于给定值的白色blob(80)
//最后一个参数越大,blob需要的越大
//作为包含的内容
blobs .Filter(blobs,
B_EXCLUDE,
CBlobGetArea(),
B_LESS,
80);

//获取发现的blob数量
int num_blobs = blobs.GetNumBlobs();

//显示过滤后的blob
IplImage * filtered = cvCreateImage(cvGetSize(img_bw),
IPL_DEPTH_8U,
3);

cvMerge(img_bw,img_bw,img_bw,NULL,filtered)

for(int i = 0; i {
currentBlob = blobs.GetBlob(i);
currentBlob-> FillBlob(filtered,CV_RGB(255,0,0));
}

//显示输入/输出窗口和图像
cvNamedWindow(input);
cvNamedWindow(output);
cvShowImage(input,img_bw);

cvShowImage(output,filtered);
cv :: waitKey(0);

/ *%基于颜色假设计算最小和最大对象大小
%checker size
maxLabelSize = prod(size(imageData)./ [4 6]);
minLabelSize = prod(size(imageData)./ [4 6] ./ 10); * /
double maxLabelSize =(dst.rows / 4.0)*(dst.cols / 6.0);
double minLabelSize =((dst.rows / 40.0)*(dst.cols / 60.0));


解决方案



  1. 我认为标签是为了连接4-8个对象。你能请解释一下什么是标签吗?我会删除任何链接。


实际上做什么的最清楚的演示是在Matlab文档 bwlabel 。如果将原始矩阵 BW 与结果矩阵 L 进行比较,您将看到它需要一个二进制映像,为 1 的每个连接的群组指定唯一标签:

  L = 

1 1 1 0 0 0 0 0
1 1 1 0 2 2 0 0
1 1 1 0 2 2 0 0
1 1 1 0 0 0 3 0
1 1 1 0 0 0 3 0
1 1 1 0 0 0 3 0
1 1 1 0 0 3 3 0
1 1 1 0 0 0 0 0

这里有三个标签的组件。此示例查找4连接的组件;如果像素位于其左侧,右侧,上方或下方,则该像素被认为是连接到当前像素。 8连接的对象包括对角线,这将导致上面的矩阵合并标签 2 3 对象2的右下角和对象3的顶部对角连接。连接的组件标记算法在Wikipedia 此处中有所描述。



< blockquote>

2.在OpenCV中连接组件在这篇文章中有些人谈论CVblob和一些关于opecv的cvContourArea,你能解释一下差别。


OpenCV 3.0已经过测试,有两种全新的方法: connectedComponents connectedComponentsWithStats documentation )。如果你想复制Matlab的 bwlabel ,这是要走的路。



我写了一个测试程序来尝试 connectedComponentsWithStats (下面的完整代码) / p>



(实际上,此图片从800x600缩小到400x300,但生成代码的代码如下。)



using:

  int nLabels = connectedComponentsWithStats(src,labels,stats,centroids,8,CV_32S); 

nLabels 中返回的值为 5 。记住,这种方法认为背景是标签 0



要查看标记区域,可将灰度值从 [0..nLabels-1] 增加到 [0..255] ,或您可以分配随机RGB值并创建彩色图像。对于这个测试,我只是打印出几个位置的值,我知道在不同的组件。

  cout< ; 显示标签值:< endl; 
//左上角的正方形中间
int component1Pixel = labels.at< int>(150,150);
cout<< pixel at(150,150)=< component1像素< endl;
//最右边的矩形中间
int component2Pixel = labels.at< int>(300,550);
cout<< pixel at(300,550)=< component2像素< endl<< endl;

显示标签值:
像素在(150,150)= 1
像素在(300,550)= 2

stats 是一个 5 x nLabels c $ c>左,顶,宽,高和区域(包括背景)。对于此图片:

  stats:
(left,top,width,height,area)
[ 0,0,800,600,421697;
100,100,101,101,10201;
500,150,101,301,30401;
350,246,10,10,36;
225,325,151,151,17665]

code> 0 是图像的全宽/高度。总计所有的区域,你会得到 480,000 = 800x600 。前4个元素可以用来创建一个边界矩形:

  Rect(Point(left,top) height))

centroids c $ c> 2 x nLabel Mat包含每个组件的质心的 x,y 坐标:

 质心:
(x,y)
[398.8575636060963,298.8746232484461;
150,150;
550,300;
354.5,250.5;
300,400]

最后,在某个时候,你可能想要对一个组件单独进行进一步处理。这里我使用 compare 生成一个新的Mat only2 ,只包含 code>标记为 2

  compare(labels,2,only2,CMP_EQ); 

比较有助于将这些像素设置为值 255 ,以便查看结果:





以下是完整的代码:

  #includeopencv2 / highgui / highgui.hpp
#includeopencv2 / imgcodecs.hpp
#includeopencv2 / imgproc / imgproc .hpp
#include< iostream>

using namespace std;
using namespace cv;

int main(int argc,const char * argv []){

//创建图像
const int color_white = 255;
Mat src = Mat :: zeros(600,800,CV_8UC1);

rectangle(src,Point(100,100),Point(200,200),color_white,CV_FILLED);
rectangle(src,Point(500,150),Point(600,450),color_white,CV_FILLED);
rectangle(src,Point(350,250),Point(359,251),color_white,CV_FILLED);
rectangle(src,Point(354,246),Point(355,255),color_white,CV_FILLED);
circle(src,Point(300,400),75,color_white,CV_FILLED);

imshow(Original,src);

//获取连接的组件和统计信息
const int connectivity_8 = 8;
Mat标签,stats,质心;

int nLabels = connectedComponentsWithStats(src,labels,stats,centroids,connectivity_8,CV_32S);

cout<< 连接组件数=< nLabels<< endl<< endl;

cout<< 显示标签值:< endl;
int component1Pixel = labels.at< int>(150,150);
cout<< pixel at(150,150)=< component1像素< endl;
int component2Pixel = labels.at< int>(300,550);
cout<< pixel at(300,550)=< component2像素< endl<< endl;

//统计信息
cout<< 显示统计和质心:< endl;
cout<< stats:< endl<< (左,顶,宽,高,面积)< endl<< stats< endl<< endl;
cout<< centroids:<< endl<< (x,y)< endl<<质心< endl<< endl;

//打印组件1的单个统计数据(组件0是背景)
cout< 组分1统计:< endl;
cout<< CC_STAT_LEFT =<< stats.at< int>(1,CC_STAT_LEFT)<< endl;
cout<< CC_STAT_TOP =<< stats.at< int>(1,CC_STAT_TOP)< endl;
cout<< CC_STAT_WIDTH =<< stats.at< int>(1,CC_STAT_WIDTH)< endl;
cout<< CC_STAT_HEIGHT =<< stats.at< int>(1,CC_STAT_HEIGHT)< endl;
cout<< CC_STAT_AREA =< stats.at< int>(1,CC_STAT_AREA)< endl;

//创建只有组件2的映像
Mat only2;
compare(labels,2,only2,CMP_EQ);

imshow(Component 2,only2);

waitKey(0);

}


Here is the original code from matlab:

 % Calculate each separated object area
    cDist=regionprops(bwImg, 'Area');
    cDist=[cDist.Area];

    % Label each object
    [bwImgLabeled, ~]=bwlabel(bwImg);

    % Calculate min and max object size based on assumptions on the color
    % checker size
    maxLabelSize = prod(size(imageData)./[4 6]);
    minLabelSize = prod(size(imageData)./[4 6]./10);

    % Find label indices for objects that are too large or too small
    remInd = find(cDist > maxLabelSize);
    remInd = [remInd find(cDist < minLabelSize)];

    % Remove over/undersized objects
    for n=1:length(remInd)
        ri = bwImgLabeled == remInd(n);
        bwImgLabeled(ri) = 0;

Here is my code using openCV

//regionprops(bwImg, 'Area');
// cDist=[cDist.Area]
//cv::FileStorage file("C:\\Users\\gdarmon\\Desktop\\gili.txt", cv::FileStorage::WRITE);
//
//file << dst;
dst.convertTo(dst,CV_8U);
cv::vector<cv::vector<cv::Point> > contours;
cv::vector<cv::Vec4i> hierarchy;
cv::findContours(dst,contours,hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);

std::vector<cv::Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
    mu[i] = cv::moments(contours[i],false);
}
vector<cv::Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ 
    mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); 
}

Since now ihave the contours I would like to user bwlabel function
1. i have figured that labeling is done in order to get connected 4-8 objects. can you please explain what is labeling actually is? I would aapriciate any link.
2.connected components in OpenCV in this article some people are talking about CVblob and some about opecv's cvContourArea, can you explain the difference. and what will be better suited for my use case?

Update: here is what I have tried using cvBlobs

IplImage* img_bw = new IplImage(dst);
CBlobResult blobs;
CBlob *currentBlob;
blobs = CBlobResult(img_bw, NULL, 0);
// Exclude all white blobs smaller than the given value (80)
// The bigger the last parameter, the bigger the blobs need
// to be for inclusion 
blobs.Filter( blobs,
    B_EXCLUDE,
    CBlobGetArea(),
    B_LESS,
    80 );

// Get the number of blobs discovered
int num_blobs = blobs.GetNumBlobs(); 

// Display the filtered blobs
IplImage* filtered = cvCreateImage( cvGetSize( img_bw ),
    IPL_DEPTH_8U,
    3 ); 

cvMerge( img_bw, img_bw, img_bw, NULL, filtered );

for ( int i = 0; i < num_blobs; i++ )
{
    currentBlob = blobs.GetBlob( i );
    currentBlob->FillBlob( filtered, CV_RGB(255,0,0));
}

// Display the input / output windows and images
cvNamedWindow( "input" );
cvNamedWindow( "output" );
cvShowImage("input", img_bw );

cvShowImage("output", filtered);
cv::waitKey(0);

 /*% Calculate min and max object size based on assumptions on the color
% checker size
maxLabelSize = prod(size(imageData)./[4 6]);
minLabelSize = prod(size(imageData)./[4 6]./10);*/
double maxLabelSize = (dst.rows/4.0) * (dst.cols/6.0);
double minLabelSize = ((dst.rows/40.0) * (dst.cols/60.0));

解决方案

  1. i have figured that labeling is done in order to get connected 4-8 objects. can you please explain what is labeling actually is? I would aapriciate any link.

The clearest demonstration of what labeling actually does is in the Matlab documentation for bwlabel. If you compare the original matrix BW to the resulting matrix L, you'll see that it takes a binary image and assigns unique labels to each connected group of 1's:

L =

     1     1     1     0     0     0     0     0
     1     1     1     0     2     2     0     0
     1     1     1     0     2     2     0     0
     1     1     1     0     0     0     3     0
     1     1     1     0     0     0     3     0
     1     1     1     0     0     0     3     0
     1     1     1     0     0     3     3     0
     1     1     1     0     0     0     0     0

Here there are three components labeled. This example looks for 4-connected components; a pixel is considered to be connected to the current pixel if it is to the left, right, above or below it. 8-connected objects include the diagonals, which would result in labels 2 and 3 being merged for the matrix above since the lower-right corner of object 2 and the top of object 3 are diagonally connected. The connected component labeling algorithm is described on Wikipedia here.

2.connected components in OpenCV in this article some people are talking about CVblob and some about opecv's cvContourArea, can you explain the difference. and what will be better suited for my use case?

OpenCV 3.0 is out of beta and has two brand new methods: connectedComponents and connectedComponentsWithStats (documentation). If you're trying to replicate Matlab's bwlabel, this is the way to go.

I wrote a test program to try out connectedComponentsWithStats (complete code below) using this as my test image:

(Actually, this image is reduced from 800x600 to 400x300, but the code to generate it is included below.)

I generated the labeled image using:

int nLabels = connectedComponentsWithStats(src, labels, stats, centroids, 8, CV_32S);

The value returned in nLabels is 5. Remember, that this method considers the background to be label 0.

To see what the labeled areas are, you could scale up the grayscale values from [0..nLabels-1] to [0..255], or you could assign random RGB values and create a color image. For this test I just printed out the values at a couple of locations that I knew were in different components.

cout << "Show label values:" << endl;
// Middle of square at top-left
int component1Pixel = labels.at<int>(150,150);
cout << "pixel at(150,150) = " << component1Pixel << endl;
// Middle of rectangle at far right
int component2Pixel = labels.at<int>(300,550);
cout << "pixel at(300,550) = " << component2Pixel << endl << endl;

Show label values:
pixel at(150,150) = 1  
pixel at(300,550) = 2  

The stats is a 5 x nLabels Mat containing left, top, width, height, and area for each component (including background). For this image:

stats:
(left,top,width,height,area)
[0, 0, 800, 600, 421697;
 100, 100, 101, 101, 10201;
 500, 150, 101, 301, 30401;
 350, 246, 10, 10, 36;
 225, 325, 151, 151, 17665]

You'll notice that component 0 is the full width/height of the image. Summing all of the areas, you get 480,000 = 800x600. The first 4 elements can be used to create a bounding rectangle:

Rect(Point(left,top), Size(width,height))

centroids is a 2 x nLabels Mat containing the x, y coordinates of the centroid of each component:

centroids:
(x, y)
[398.8575636060963, 298.8746232484461;
 150, 150;
 550, 300;
 354.5, 250.5;
 300, 400]

Finally, at some point you're probably going to want to do further processing on one of the components individually. Here I use compare to generate a new Mat only2 that only contains pixels from labels that labeled 2.

compare(labels, 2, only2, CMP_EQ);

compare helpfully sets these pixels to a value of 255 in the new image so you can see the results:

Here's the complete code:

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

using namespace std;
using namespace cv;

int main(int argc, const char * argv[]) {

    // Create an image
    const int color_white = 255;
    Mat src = Mat::zeros(600, 800, CV_8UC1);

    rectangle(src, Point(100, 100), Point(200, 200), color_white, CV_FILLED);
    rectangle(src, Point(500, 150), Point(600, 450), color_white, CV_FILLED);
    rectangle(src, Point(350,250), Point(359,251), color_white, CV_FILLED);
    rectangle(src, Point(354,246), Point(355,255), color_white, CV_FILLED);
    circle(src, Point(300, 400), 75, color_white, CV_FILLED);

    imshow("Original", src);

    // Get connected components and stats
    const int connectivity_8 = 8;
    Mat labels, stats, centroids;

    int nLabels = connectedComponentsWithStats(src, labels, stats, centroids, connectivity_8, CV_32S);

    cout << "Number of connected components = " << nLabels << endl << endl;

    cout << "Show label values:" << endl;
    int component1Pixel = labels.at<int>(150,150);
    cout << "pixel at(150,150) = " << component1Pixel << endl;
    int component2Pixel = labels.at<int>(300,550);
    cout << "pixel at(300,550) = " << component2Pixel << endl << endl;

    // Statistics
    cout << "Show statistics and centroids:" << endl;
    cout << "stats:" << endl << "(left,top,width,height,area)" << endl << stats << endl << endl;
    cout << "centroids:" << endl << "(x, y)" << endl << centroids << endl << endl;

    // Print individual stats for component 1 (component 0 is background)
    cout << "Component 1 stats:" << endl;
    cout << "CC_STAT_LEFT   = " << stats.at<int>(1,CC_STAT_LEFT) << endl;
    cout << "CC_STAT_TOP    = " << stats.at<int>(1,CC_STAT_TOP) << endl;
    cout << "CC_STAT_WIDTH  = " << stats.at<int>(1,CC_STAT_WIDTH) << endl;
    cout << "CC_STAT_HEIGHT = " << stats.at<int>(1,CC_STAT_HEIGHT) << endl;
    cout << "CC_STAT_AREA   = " << stats.at<int>(1,CC_STAT_AREA) << endl;

    // Create image with only component 2
    Mat only2;
    compare(labels, 2, only2, CMP_EQ);

    imshow("Component 2", only2);

    waitKey(0);

}

这篇关于openCV 2.4.10 bwlabel - 连接组件的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆