任何人都知道我的问题是什么(运动跟踪)..... PLZ帮助我 [英] any body know what is my problem(motion Tracking).....plz help me

查看:75
本文介绍了任何人都知道我的问题是什么(运动跟踪)..... PLZ帮助我的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

4周后,我发现我的问题,我应该设置我的相机驱动程序并更新它为我的

笔记本电脑,但当我调试我的代码时,它说成功,但许多相机视图窗口打开和之后,当我使用'T'进行跟踪时,发生故障排除发生任何身体都知道是什么问题?? !!!!

i调试它并且它对我的笔记本电脑的朋友非常有用但我的笔记本电脑许多Camera View窗口打开!!!



  #include   <   opencv\cv.h  >  
#include < opencv \highgui.h >

使用 命名空间 std;
使用 命名空间 cv;

// 我们在threshold()函数中使用的灵敏度值
const 静态 int SENSITIVITY_VALUE = 20 ;
// 用于平滑图像以消除可能噪音的模糊大小
// 增加我们要跟踪的对象的大小。 (很像扩张和腐蚀)
const 静态 int BLUR_SIZE = 10 ;
// 我们只有一个对象可以搜索
// 并跟踪其位置。
int theObject [ 2 ] = { 0 0 };
// 对象的边界矩形,我们将使用此中心作为其位置。
Rect objectBoundingRectangle = Rect( 0 0 0 0 );


// int to string helper function
string intToString( int number){

// 此函数有一个数字输入和字符串输出
std :: stringstream ss;
ss<<数;
return ss.str();
}

void searchForMovement(Mat thresholdImage,Mat& cameraFeed){
// 注意我们如何使用'&' cameraFeed的操作员。这是因为我们希望
// 来获取传递给函数的值并对其进行操作而不是仅使用副本。
// 例如。我们在这个函数中绘制到cameraFeed,然后显示在main()函数中。
bool objectDetected = ;
Mat temp;
thresholdImage.copyTo(temp);
// 输出findContours所需的这两个向量
vector<矢量<点和GT; >轮廓;
vector< Vec4i>层次结构;
// 使用openCV findContours函数查找已过滤图像的轮廓
// findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE); //检索所有轮廓
findContours (temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE); // 检索外部轮廓

// 如果轮廓向量不为空,我们找到了一些对象
< span class =code-keyword> if
(contours.size()> 0)objectDetected = true ;
else objectDetected = false ;

if (objectDetected){
// 最大轮廓位于轮廓矢量的末尾
// 我们将简单地假设最大轮廓是我们正在寻找的对象。
vector<矢量<点和GT; > largestContourVec;
largestContourVec.push_back(contours.at(contours.size() - 1 ));
// 围绕最大轮廓创建一个边界矩形然后找到它的质心
// 这将是对象的最终估计位置。
objectBoundingRectangle = boundingRect(largestContourVec.at ( 0 ));
int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;

// 通过更改'theObject'数组值来更新对象位置
theObject [ 0 ] = xpos,theObject [ 1 ] = ypos;
}
// 制作一些临时x和y变量,这样我们就不必输入所以
int x = theObject [ 0 ];
int y = theObject [ 1 ];
// 在对象上绘制一些十字准线
circle(cameraFeed,Point( x,y), 20 ,标量( 0 255 0 ), 2 );
行(cameraFeed,Point(x,y),Point(x,y- 25 ),标量( 0 255 0 ), 2 );
line(cameraFeed,Point(x,y),Point(x,y + 25),Scalar( 0 255 0 ), 2 );
行(cameraFeed,Point(x,y),Point(x- 25 ,y),标量( 0 255 0 ), 2 );
line(cameraFeed,Point(x,y),Point(x + 25,y),Scalar( 0 255 0 ), 2 );
putText(cameraFeed, 跟踪对象at( + intToString(x)+ + intToString(y)+ ,Point(x,y), 1 1 ,标量( 255 0 0 ), 2 );



}
int main(){

// 添加功能的一些布尔变量
bool objectDetected = false ;
// 这两个可以通过按'd'或't'来切换
bool debugMode = false ;
bool trackingEnabled = false ;
// 暂停和恢复代码
bool pause = false ;
// 设置我们需要的矩阵
// 我们将要比较的两个帧
Mat frame1,frame2;
// 他们的灰度图像(absdiff()函数需要)
Mat grayImage1,grayImage2;
// 生成差异图像
Mat differenceImage;
// thresholded difference image(用于findContours()函数)
Mat thresholdImage;
// 视频捕获对象。
VideoCapture capture;

capture.open( 0 );

if (!capture.isOpened()){
cout<< 错误获取视频馈送;
getchar();
返回 - 1 ;
}
while 1 ){

// 我们可以通过每次视频到达最后一帧时重新打开捕获来循环播放视频



// 检查视频是否已到达最后一帧。
// 我们添加'-1'因为我们正在从视频中读取两帧一次。
// 如果不包含此内容,我们会收到内存错误!

// 读取第一帧
捕获.read(帧1);
// 将frame1转换为灰度以进行帧差异
cv :: cvtColor (帧1,grayImage1,COLOR_BGR2GRAY);
// 复制第二帧
capture.read(frame2);
// 将frame2转换为帧差异的灰度
cv :: cvtColor (帧2,grayImage2,COLOR_BGR2GRAY);
// 使用顺序图像执行帧差异。这将输出强度图像
// 不要将此与阈值图像混淆,之后我们需要进行阈值处理。
cv :: absdiff(grayImage1,grayImage2,differenceImage);
// 给定灵敏度值的阈值强度图像
cv :: threshold (differenceImage,thresholdImage,SENSITIVITY_VALUE, 255 ,THRESH_BINARY);
if (debugMode == true ){
// 显示差异图像和阈值图像
cv :: imshow( 差异图像,differenceImage);
cv :: imshow( 阈值图像,thresholdImage);
} 其他 {
// 如果不是处于调试模式,则销毁窗口,以便我们不再看到它们
cv :: destroyWindow( 差异图像);
cv :: destroyWindow( 阈值图像);
}
// 使用blur()来平滑图像,消除可能的噪音
cv :: blur(thresholdImage,thresholdImage,cv :: Size(BLUR_SIZE,BLUR_SIZE));
// 增加我们要跟踪的对象的大小。 (很像扩张和腐蚀)

// 再次获得阈值以获取二进制来自模糊输出的图像
cv :: threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE, 255 ,THRESH_BINARY);
if (debugMode == true ){
// 在模糊后显示阈值图像

imshow( 最终阈值图像,thresholdImage);



}
其他 {
// 如果没有处于调试模式,则销毁窗口,以便我们不再看到它们
cv :: destroyWindow( 最终阈值图像);
}

// 如果启用跟踪,请在我们的阈值图像中搜索轮廓

if (trackingEnabled){


searchForMovement(thresholdImage,frame1);

}

// 显示我们捕获的帧
imshow( Frame1,frame1);
// 检查按钮是否被按下。
// 此程序的正常运行需要10ms延迟
// 如果删除,框架将没有足够的时间进行参考和空白
// 将出现图像。
switch (waitKey( 10 )){

case 27 // 'esc'键已被按下,退出程序。
< span class =code-keyword> return
0 ;
case 116 // 't'已被按下。这将切换跟踪
trackingEnabled =!trackingEnabled;
if (trackingEnabled == false )cout<< 禁用跟踪。<< endl;
else cout<< 启用跟踪。<< ENDL;
break ;
case 100 // 'd'已被按下。这将是调试模式
debugMode =!debugMode;
if (debugMode == false )cout<< 禁用调试模式。<< endl;
else cout<< 调试模式启用<。< ENDL;
break ;
case 112 // 'p'已被按下。这将暂停/恢复代码。
pause =!pause;
if (pause == true ){cout<< 代码暂停,再次按p恢复<< endl;
while (pause == true ){
// 保持此循环直到
switch (waitKey ()){
// switch语句中的switch语句?精神崩溃。
case 112
// 将暂停更改为false
pause = false ;
break ;
}
}
}
其他 cout<< 代码已恢复。<< endl;
break ;
}







}

return 0 ;

}

解决方案

您应该前往OpenCV官方网站寻求支持。 欢迎使用opencv文档! [ ^ ]



以下是该网站的示例,看起来它显示了您想要做的事情。

级联分类器 [ ^ ]

< blockquote class =quote>

Quote:

使用CascadeClassifier类检测视频流中的对象。特别是,我们将使用以下函数:


after 4 week i find my problem and i should setup my camera driver and update it for my
laptop but when i debug my code it say successful but Many Camera View windows open and after that when i use 'T' for tracking it have troubleshoot occur any body know what is the problem??!!!!
i debug it and it worked very well for my laptop's friend but for my laptop Many Camera View windows open !!!

#include <opencv\cv.h>
#include <opencv\highgui.h>

using namespace std;
using namespace cv;

//our sensitivity value to be used in the threshold() function
const static int SENSITIVITY_VALUE = 20;
//size of blur used to smooth the image to remove possible noise and
//increase the size of the object we are trying to track. (Much like dilate and erode)
const static int BLUR_SIZE = 10;
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its position.
Rect objectBoundingRectangle = Rect(0,0,0,0);


//int to string helper function
string intToString(int number){

	//this function has a number input and string output
	std::stringstream ss;
	ss << number;
	return ss.str();
}

void searchForMovement(Mat thresholdImage, Mat &cameraFeed){
	//notice how we use the '&' operator for the cameraFeed. This is because we wish
	//to take the values passed into the function and manipulate them, rather than just working with a copy.
	//eg. we draw to the cameraFeed in this function which is then displayed in the main() function.
	bool objectDetected=false;
	Mat temp;
	thresholdImage.copyTo(temp);
	//these two vectors needed for output of findContours
	vector< vector<Point> > contours;
	vector<Vec4i> hierarchy;
	//find contours of filtered image using openCV findContours function
	//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
	findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours

	//if contours vector is not empty, we have found some objects
	if(contours.size()>0)objectDetected=true;
	else objectDetected = false;

	if(objectDetected){
		//the largest contour is found at the end of the contours vector
		//we will simply assume that the biggest contour is the object we are looking for.
		vector< vector<Point> > largestContourVec;
		largestContourVec.push_back(contours.at(contours.size()-1));
		//make a bounding rectangle around the largest contour then find its centroid
		//this will be the object's final estimated position.
		objectBoundingRectangle = boundingRect(largestContourVec.at(0));
		int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
		int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;

		//update the objects positions by changing the 'theObject' array values
		theObject[0] = xpos , theObject[1] = ypos;
	}
	//make some temp x and y variables so we dont have to type out so much
	int x = theObject[0];
	int y = theObject[1];
	//draw some crosshairs on the object
	circle(cameraFeed,Point(x,y),20,Scalar(0,255,0),2);
	line(cameraFeed,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
	line(cameraFeed,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
	line(cameraFeed,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
	line(cameraFeed,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);
	putText(cameraFeed,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);



}
int main(){

	//some boolean variables for added functionality
	bool objectDetected = false;
	//these two can be toggled by pressing 'd' or 't'
	bool debugMode = false;
	bool trackingEnabled = false;
	//pause and resume code
	bool pause = false;
	//set up the matrices that we will need
	//the two frames we will be comparing
	Mat frame1,frame2;
	//their grayscale images (needed for absdiff() function)
	Mat grayImage1,grayImage2;
	//resulting difference image
	Mat differenceImage;
	//thresholded difference image (for use in findContours() function)
	Mat thresholdImage;
	//video capture object.
	VideoCapture capture;

	capture.open(0);

		if(!capture.isOpened()){
			cout<<"ERROR ACQUIRING VIDEO FEED\n";
			getchar();
			return -1;
		}
	while(1){

		//we can loop the video by re-opening the capture every time the video reaches its last frame

		

		//check if the video has reach its last frame.
		//we add '-1' because we are reading two frames from the video at a time.
		//if this is not included, we get a memory error!
	
			//read first frame
			capture.read(frame1);
			//convert frame1 to gray scale for frame differencing
			cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
			//copy second frame
			capture.read(frame2);
			//convert frame2 to gray scale for frame differencing
			cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
			//perform frame differencing with the sequential images. This will output an "intensity image"
			//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
			cv::absdiff(grayImage1,grayImage2,differenceImage);
			//threshold intensity image at a given sensitivity value
			cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
			if(debugMode==true){
				//show the difference image and threshold image
				cv::imshow("Difference Image",differenceImage);
				cv::imshow("Threshold Image",thresholdImage);
			}else{
				//if not in debug mode, destroy the windows so we don't see them anymore
				cv::destroyWindow("Difference Image");
				cv::destroyWindow("Threshold Image");
			}
			//use blur() to smooth the image, remove possible noise and
			cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
			//increase the size of the object we are trying to track. (Much like dilate and erode)

			//threshold again to obtain binary image from blur output
			cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
			if(debugMode==true){
				//show the threshold image after it's been "blurred"

				imshow("Final Threshold Image",thresholdImage);



			}
			else {
				//if not in debug mode, destroy the windows so we don't see them anymore
				cv::destroyWindow("Final Threshold Image");
			}

			//if tracking enabled, search for contours in our thresholded image

			if(trackingEnabled){


				searchForMovement(thresholdImage,frame1);

			}
			
			//show our captured frame
			imshow("Frame1",frame1);
			//check to see if a button has been pressed.
			//this 10ms delay is necessary for proper operation of this program
			//if removed, frames will not have enough time to referesh and a blank 
			//image will appear.
			switch(waitKey(10)){

			case 27: //'esc' key has been pressed, exit program.
				return 0;
			case 116: //'t' has been pressed. this will toggle tracking
				trackingEnabled = !trackingEnabled;
				if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
				else cout<<"Tracking enabled."<<endl;
				break;
			case 100: //'d' has been pressed. this will debug mode
				debugMode = !debugMode;
				if(debugMode == false) cout<<"Debug mode disabled."<<endl;
				else cout<<"Debug mode enabled."<<endl;
				break;
			case 112: //'p' has been pressed. this will pause/resume the code.
				pause = !pause;
				if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
				while (pause == true){
					//stay in this loop until 
					switch (waitKey()){
						//a switch statement inside a switch statement? Mind blown.
					case 112: 
						//change pause back to false
						pause = false;
						break;
					}
				}
				}
				else cout<<"Code resumed."<<endl;
						break;
					}
				


			


		
	}

	return 0;

}

解决方案

You should go to the OpenCV official site for support. Welcome to opencv documentation![^]

Here is an example from that site that looks like it shows what you want to do.
Cascade Classifier[^]

Quote:

Use the CascadeClassifier class to detect objects in a video stream. Particularly, we will use the functions:


这篇关于任何人都知道我的问题是什么(运动跟踪)..... PLZ帮助我的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆