如何将被跟踪对象的位置(x,y)写入文本文件? [英] How to write the position (x,y) of the object being tracked into text file?

查看:393
本文介绍了如何将被跟踪对象的位置(x,y)写入文本文件?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我试图将被跟踪对象的位置(x,y)记录到文本文件中。
我使用opencv和c ++ visual 2010直到现在我可以保存一个数据,但这个数据是初始位置,但重复。我想让它保存每一帧的实际位置。



简而言之,我如何在屏幕上写入由PutText()写的确切数据,但是写入文件?
看到puttext()写入屏幕

  //将对象的位置写入屏幕
putText(framein,Tracking object at(+ intToString(x)+,+ intToString(y)+),Point(x,y),1,1,Scalar(255,0,0) ,2);

我认为问题出现在这个部分:

  //保存位置
ofstream file_;
file_.open(position.txt);
file_<<这些是由前景对象\\\
创建的位置模式;
for(int count = -1; count <10000; count ++)
{
file_<<X:<< intToString(x)< <<Y:<< intToString(y)<<\\\
;


}
file_.close();

完整代码是:

  #include< opencv2 / opencv.hpp> 
#include< opencv2 / core / core.hpp>
#include< opencv2 / highgui / highgui.hpp>
#include< opencv2 / video / background_segm.hpp>
#include< opencv2 / imgproc / imgproc.hpp>
#include< opencv2 / video / video.hpp>
//#include< opencv2 / videoio.hpp>
//#include< opencv2 / imgcodecs.hpp>
// C
#include< stdio.h>
// C ++
#include< iostream>
#include< sstream>
#include< fstream>

using namespace cv;
using namespace std;

//全局变量
cv:Mat fg_mask;
cv :: Mat frame;
cv :: Mat binaryImage;
cv :: Mat ContourImg;
Ptr< BackgroundSubtractor> pMOG; // MOG背景减法器
int keyboard; //从键盘输入
//我们只有一个对象来搜索
//并跟踪它的位置。
int theObject [2] = {0,0};
//对象的边界矩形,我们将使用它的中心作为它的位置。
Rect objectBoundingRectangle = Rect(0,0,0,0);
//我们要使用的敏感度值
const static int SENSITIVITY_VALUE = 50;

string intToString(int number){

//此函数有一个数字输入和字符串输出
std :: stringstream ss;
ss<<数;
return ss.str();
}
void searchForMovement(Mat binaryImage,Mat& framein){
//注意我们如何使用objectDetected和cameraFeed的'&'操作符。这是因为我们希望
//把传递给函数的值和操作它们,而不是只使用一个副本。
// eg。我们绘制到cameraFeed,显示在main()函数中。
bool objectDetected = false;
Mat temp;
binaryImage.copyTo(temp);
//这两个向量需要输出findContours
vector<向量< Point> >轮廓;
vector< Vec4i>层次;
//使用openCV查找过滤图像的轮廓findContours函数
// findContours(temp,contour,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE); //检索所有轮廓
findContours(temp,contour, ,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE); //检索外部轮廓

//如果轮廓矢量不为空,我们发现了一些对象
if(contours.size()> 0)objectDetected =真正;
else objectDetected = false;

if(objectDetected){
//在轮廓向量的末端找到最大轮廓
//我们将简单地假定最大轮廓是我们的对象寻找。
vector<向量< Point> >最大ContourVec;
largestContourVec.push_back(contoururs.at(contours.size() - 1));
//在最大轮廓周围创建一个边界矩形,然后找到它的质心
//这将是对象的最终估计位置。
objectBoundingRectangle = boundingRect(largestContourVec.at(0));
int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;

//通过更改'theObject'数组值来更新对象位置
theObject [0] = xpos,theObject [1] = ypos;
}
//使一些temp x和y变量,所以我们不必输入这么多
int x = theObject [0];
int y = theObject [1];

//在对象周围画一些十字线
circle(framein,Point(x,y),20,Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y + 25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x + 25,y),Scalar(0,255,0),2);

//将对象的位置写入屏幕
putText(framein,Tracking object at(+ intToString(x)+,+ intToString(y)+) ,Point(x,y),1,1,Scalar(255,0,0),2);

//保存位置
ofstream file_;
file_.open(position.txt);
file_<<这些是由前景对象\\\
做出的位置模式;
for(int count = -1; count <10000; count ++)
{
file_<<X:<< intToString(x)< <<Y:<< intToString(y)<<\\\
;


}
file_.close();
//std::cin.get();


}

void morphOps(Mat& thresh){

//创建结构化元素,用于 和侵蚀图像。
//这里选择的元素是一个3px x 3px的矩形

Mat erodeElement = getStructuringElement(MORPH_RECT,Size(2,2)); // 3x3
//用更大的元素展开,确保对象是可见的
Mat dilateElement = getStructuringElement(MORPH_RECT,Size(1,1)); // 8x8

erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);


dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);

}
int main(int,char **)
{
//添加功能的一些布尔变量
bool objectDetected = false;
//这两个可以通过按'd'或't'来切换
bool debugMode = true;
bool trackingEnabled = true;
//暂停和恢复代码
bool pause = false;
//视频捕获对象。
VideoCapture capture;
while(1){

//我们可以通过在每次视频到达最后一帧时重新打开捕获来循环视频。

capture.open Video_003.avi);
//capture.open(0);

if(!capture.isOpened()){
cout<<ERROR ACQUIRING VIDEO FEED \\\
;
getchar();
return -1;
}
double fps = capture.get(CV_CAP_PROP_FPS); // get the frames per seconds of the video
cout<< 每秒帧数:< fps<< endl;

pMOG = new BackgroundSubtractorMOG();

//形态元素
Mat元素= getStructuringElement(MORPH_RECT,Size(7,7),Point(3,3));

int count = -1;

//检查视频是否到达最后一帧。
//我们添加-1,因为我们一次从视频中读取两个帧。
//如果这不包括,我们得到一个内存错误!
while(capture.get(CV_CAP_PROP_POS_FRAMES)< capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
//从相机获取帧
capture.read(frame);
//更新计数器
++ count;
//调整大小
resize(frame,frame,Size(frame.size()。width / 2,frame.size()。height / 2)
// Blur
blur(frame,frame,Size(5,5));

//背景减法
pMOG-> operator()(frame,fg_mask,0.05);

////////
//前处理
// 1分删除
morphologyEx(fg_mask,binaryImage,CV_MOP_CLOSE,element);

// threshold
//给定灵敏度值的阈值强度图像
cv :: threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
morphOps(binaryImage);

if(debugMode == true){

imshow(frame,frame);
imshow(fg_mask,fg_mask);
imshow(final,binaryImage);
} else {
//如果不是在调试模式,销毁窗口,所以我们不再看到它们
cv :: destroyWindow(frame);
cv :: destroyWindow(fg_mask);
cv :: destroyWindow(final);
}

//如果启用跟踪,在我们的阈值图像中搜索轮廓
if(trackingEnabled){

searchForMovement(binaryImage,frame);

//查找轮廓
ContourImg = binaryImage.clone();
// less blob delete
vector<载体,点> >轮廓;

findContours(ContourImg,
contoururs,//矢量轮廓
CV_RETR_EXTERNAL,//检索外部轮廓
CV_CHAIN_APPROX_NONE); //每个轮廓的所有像素


vector< Rect>输出;
vector<载体,点> > :: iterator itc = contoururs.begin();
while(itc!= contours.end()){

//创建对象的边界rect
//在原点图像上绘制直方图
Rect mr = boundingRect (Mat(* itc));
rectangle(frame,mr,CV_RGB(255,0,0));
++ itc;
}
}
imshow(frame,frame);
//保存前景掩码
string name =mask_+ std :: to_string(static_cast< long long>(count))+.png;
imwrite(D:\\SO\\temp\\+ name,fg_mask);

switch(waitKey(10)){

case 27://'esc'键已被按下,退出程序。
return 0;
case 116://'t'已按下。这将切换跟踪
trackingEnabled =!trackingEnabled;
if(trackingEnabled == false)cout <<<Tracking disabled。<< endl;
else cout<<Tracking enabled。<< endl;
break;
case 100://'d'已经按下。这将调试模式
debugMode =!debugMode;
if(debugMode == true)cout<<Debug mode enabled。<< endl;
else cout<<Debug mode disabled。<< endl;
break;
case 112://'p'已被按下。这将暂停/恢复代码。
pause =!pause;
if(pause == true){cout<<代码暂停,再次按p恢复<< endl;
while(pause == true){
//保持在这个循环中直到
切换(waitKey()){
// switch语句里面的switch语句?心不在焉。
case 112:
//将暂停改回false
pause = false;
cout<<代码已恢复<<< endl;
break;
}

}
//相机将在VideoCapture析构函数中自动取消初始化

}
}
}
//在重新打开之前释放捕获并再次循环。
capture.release();
}
return 0;
}


解决方案

在您的代码。但是要回答你的问题:



在你的代码中,你打开文件,输出x和y的相同值10000次,并关闭 / em>。你应该做的是在开始时打开文件,每帧只输出一对坐标,然后关闭文件。



示例代码:



$ b

在主循环开始之前

ofstream file_;
file_.open(position.txt);
file_<<这些是由前景对象\\\
做出的位置模式;

在主循环中

  file_ <<X:<<< intToString(x)<<<Y:< y)<\ n; 

主循环结束后

  file_.close(); 

编辑:按照我的意图添加完整代码:

  #include< opencv2 / opencv.hpp> 
#include< opencv2 / core / core.hpp>
#include< opencv2 / highgui / highgui.hpp>
#include< opencv2 / video / background_segm.hpp>
#include< opencv2 / imgproc / imgproc.hpp>
#include< opencv2 / video / video.hpp>
//#include< opencv2 / videoio.hpp>
//#include< opencv2 / imgcodecs.hpp>
// C
#include< stdio.h>
// C ++
#include< iostream>
#include< sstream>
#include< fstream>

using namespace cv;
using namespace std;

ofstream file_;

//全局变量
cv :: Mat fg_mask;
cv :: Mat frame;
cv :: Mat binaryImage;
cv :: Mat ContourImg;
Ptr< BackgroundSubtractor> pMOG; // MOG背景减法器
int keyboard; //从键盘输入
//我们只有一个对象来搜索
//并跟踪它的位置。
intObjectObject [2] = {0,0};
//对象的边界矩形,我们将使用它的中心作为它的位置。
Rect objectBoundingRectangle = Rect(0,0,0,0);
//我们要使用的敏感度值
const static int SENSITIVITY_VALUE = 50;

string intToString(int number){

//此函数有一个数字输入和字符串输出
std :: stringstream ss;
ss<<数;
return ss.str();
}
void searchForMovement(Mat binaryImage,Mat& framein){
//注意我们如何使用objectDetected和cameraFeed的'&'操作符。这是因为我们希望
//把传递给函数的值和操作它们,而不是只使用一个副本。
// eg。我们绘制到cameraFeed,显示在main()函数中。
bool objectDetected = false;
Mat temp;
binaryImage.copyTo(temp);
//这两个向量需要输出findContours
vector<向量< Point> >轮廓;
vector< Vec4i>层次;
//使用openCV查找过滤图像的轮廓findContours函数
// findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE); //检索所有轮廓
findContours(temp,contour, ,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE); //检索外部轮廓

//如果轮廓矢量不为空,我们发现了一些对象
if(contours.size()> 0)objectDetected =真正;
else objectDetected = false;

if(objectDetected){
//在轮廓向量的末端找到最大轮廓
//我们将简单地假定最大轮廓是我们的对象寻找。
vector<向量< Point> >最大ContourVec;
largestContourVec.push_back(contoururs.at(contours.size() - 1));
//在最大轮廓周围创建一个边界矩形,然后找到它的质心
//这将是对象的最终估计位置。
objectBoundingRectangle = boundingRect(largestContourVec.at(0));
int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;

//通过更改'theObject'数组值来更新对象位置
theObject [0] = xpos,theObject [1] = ypos;
}
//使一些temp x和y变量,所以我们不必输入这么多
int x = theObject [0];
int y = theObject [1];

//在对象周围画一些十字线
circle(framein,Point(x,y),20,Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y + 25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x + 25,y),Scalar(0,255,0),2);

//将对象的位置写入屏幕
putText(framein,Tracking object at(+ intToString(x)+,+ intToString(y)+) ,Point(x,y),1,1,Scalar(255,0,0),2);

//保存位置
file_<<X:< ;

//std::cin.get();


}

void morphOps(Mat& thresh){

//创建结构化元素,用于 和侵蚀图像。
//这里选择的元素是一个3px x 3px的矩形

Mat erodeElement = getStructuringElement(MORPH_RECT,Size(2,2)); // 3x3
//用更大的元素展开,确保对象是可见的
Mat dilateElement = getStructuringElement(MORPH_RECT,Size(1,1)); // 8x8

erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);


dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);

}
int main(int,char **)
{

file_.open(position.txt);
file_<<这些是由前景对象\\\
做出的位置模式;

//用于添加功能的一些布尔变量
bool objectDetected = false;
//这两个可以通过按'd'或't'来切换
bool debugMode = true;
bool trackingEnabled = true;
//暂停和恢复代码
bool pause = false;
//视频捕获对象。
VideoCapture capture;

while(1){

//我们可以通过在每次视频到达最后一帧时重新打开捕获循环视频。

capture.open(Video_003.avi);
//capture.open(0);

if(!capture.isOpened()){
cout<<ERROR ACQUIRING VIDEO FEED \\\
;
getchar();
return -1;
}
double fps = capture.get(CV_CAP_PROP_FPS); // get the frames per seconds of the video
cout<< 每秒帧数:< fps<< endl;

pMOG = new BackgroundSubtractorMOG();

//形态元素
Mat元素= getStructuringElement(MORPH_RECT,Size(7,7),Point(3,3));

int count = -1;

//检查视频是否到达最后一帧。
//我们添加-1,因为我们一次从视频中读取两个帧。
//如果这不包括,我们得到一个内存错误!
while(capture.get(CV_CAP_PROP_POS_FRAMES)< capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
//从相机获取帧
capture.read(frame);
//更新计数器
++ count;
//调整大小
resize(frame,frame,Size(frame.size()。width / 2,frame.size()。height / 2)
// Blur
blur(frame,frame,Size(5,5));

//背景减法
pMOG-> operator()(frame,fg_mask,0.05);

////////
//前处理
// 1分删除
morphologyEx(fg_mask,binaryImage,CV_MOP_CLOSE,element);

// threshold
//给定灵敏度值的阈值强度图像
cv :: threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
morphOps(binaryImage);

if(debugMode == true){

imshow(frame,frame);
imshow(fg_mask,fg_mask);
imshow(final,binaryImage);
} else {
//如果不是在调试模式,销毁窗口,所以我们不再看到它们
cv :: destroyWindow(frame);
cv :: destroyWindow(fg_mask);
cv :: destroyWindow(final);
}

//如果启用跟踪,在我们的阈值图像中搜索轮廓
if(trackingEnabled){

searchForMovement(binaryImage,frame);

//查找轮廓
ContourImg = binaryImage.clone();
// less blob delete
vector<载体,点> >轮廓;

findContours(ContourImg,
contoururs,//矢量轮廓
CV_RETR_EXTERNAL,//检索外部轮廓
CV_CHAIN_APPROX_NONE); //每个轮廓的所有像素


vector< Rect>输出;
vector<载体,点> > :: iterator itc = contoururs.begin();
while(itc!= contours.end()){

//创建对象的边界rect
//直接绘制原点图像
Rect mr = boundingRect (mat(* itc));
rectangle(frame,mr,CV_RGB(255,0,0));
++ itc;
}
}
imshow(frame,frame);
//保存前景掩码
string name =mask_+ std :: to_string(static_cast< long long>(count))+.png;
imwrite(D:\\SO\\temp\\+ name,fg_mask);

switch(waitKey(10)){

case 27://'esc'键已被按下,退出程序。
return 0;
case 116://'t'已按下。这将切换跟踪
trackingEnabled =!trackingEnabled;
if(trackingEnabled == false)cout<<Tracking disabled。<< endl;
else cout<<Tracking enabled。<< endl;
break;
case 100://'d'已经按下。这将调试模式
debugMode =!debugMode;
if(debugMode == true)cout<<Debug mode enabled。<< endl;
else cout<<Debug mode disabled。<< endl;
break;
case 112://'p'已被按下。这将暂停/恢复代码。
pause =!pause;
if(pause == true){cout<<代码暂停,再次按p恢复<< endl;
while(pause == true){
//保持在这个循环中直到
切换(waitKey()){
// switch语句里面的switch语句?心不在焉。
case 112:
//将暂停改回false
pause = false;
cout<<代码已恢复<<< endl;
break;
}

}
//相机将在VideoCapture析构函数中自动取消初始化

}
}
}
//在重新打开之前释放捕获并再次循环。
capture.release();
//关闭位置日志
file_.close();
}
return 0;
}


I am trying to record the position(x,y) of the object being tracked into text file. I am using opencv and c++ visual 2010. till now i can save a data but this data is the initial position but repeated. i want it to save the actual position at every frame.

In short, how can i write the exact data writen by PutText() on the screen but to a file? see what puttext() write to the screen

 //write the position of the object to the screen
 putText(framein,"Tracking object at (" +   intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);

I think the problem is in this portion:

//save position
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
for( int count = -1; count < 10000; count++ )
{
    file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";


}
file_.close();  

the full code is this:

 #include < opencv2/opencv.hpp>
 #include < opencv2/core/core.hpp>
 #include < opencv2/highgui/highgui.hpp>
 #include < opencv2/video/background_segm.hpp>
 #include < opencv2/imgproc/imgproc.hpp>
 #include < opencv2/video/video.hpp>
//#include < opencv2/videoio.hpp>
//#include < opencv2/imgcodecs.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
#include <fstream>

using namespace cv;
using namespace std;

//global variables
cv::Mat fg_mask;
cv::Mat frame;
cv::Mat binaryImage;
cv::Mat ContourImg;
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its    position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//our sensitivity value to be used 
const static int SENSITIVITY_VALUE = 50;

string intToString(int number){

//this function has a number input and string output
std::stringstream ss;
ss << number;
return ss.str();
 }
 void searchForMovement(Mat binaryImage, Mat &framein){
//notice how we use the '&' operator for objectDetected and cameraFeed. This    is because we wish
 //to take the values passed into the function and manipulate them, rather t  han just working with a copy.
 //eg. we draw to the cameraFeed to be displayed in the main() function.
bool objectDetected = false;
Mat temp;
binaryImage.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours

//if contours vector is not empty, we have found some objects
if(contours.size()>0)objectDetected=true;
else objectDetected = false;

 if(objectDetected){
    //the largest contour is found at the end of the contours vector
    //we will simply assume that the biggest contour is the object we are looking for.
    vector< vector<Point> > largestContourVec;
    largestContourVec.push_back(contours.at(contours.size()-1));
    //make a bounding rectangle around the largest contour then find its centroid
    //this will be the object's final estimated position.
    objectBoundingRectangle = boundingRect(largestContourVec.at(0));
    int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
    int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;

    //update the objects positions by changing the 'theObject' array values
    theObject[0] = xpos , theObject[1] = ypos;
}
//make some temp x and y variables so we dont have to type out so much
int x = theObject[0];
int y = theObject[1];

//draw some crosshairs around the object
circle(framein,Point(x,y),20,Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);

//write the position of the object to the screen
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);

//save position
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
for( int count = -1; count < 10000; count++ )
{
    file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";


}
file_.close();
//std::cin.get();


  }

   void morphOps(Mat &thresh){

    //create structuring element that will be used to "dilate" and "erode" image.
   //the element chosen here is a 3px by 3px rectangle

  Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
  //dilate with larger element so make sure object is nicely visible
  Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8

    erode(thresh,thresh,erodeElement);
   erode(thresh,thresh,erodeElement);


   dilate(thresh,thresh,dilateElement);
   dilate(thresh,thresh,dilateElement);

   }
 int main(int, char**)
 {
//some boolean variables for added functionality
bool objectDetected = false;
//these two can be toggled by pressing 'd' or 't'
bool debugMode = true;
bool trackingEnabled = true;
//pause and resume code
bool pause = false;
//video capture object.
VideoCapture capture;
while(1){

    //we can loop the video by re-opening the capture every time the video reaches its last frame

    capture.open("Video_003.avi");
    //capture.open(0);

    if(!capture.isOpened()){
        cout<<"ERROR ACQUIRING VIDEO FEED\n";   
        getchar();
        return -1;
    }
    double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
     cout << "Frame per seconds : " << fps << endl;

pMOG = new BackgroundSubtractorMOG();

 //morphology element  
 Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );

int count = -1;

 //check if the video has reach its last frame.
    //we add '-1' because we are reading two frames from the video at a time.
    //if this is not included, we get a memory error!
    while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
    // Get frame from camera
        capture.read(frame);
    // Update counter
    ++count;
    //Resize  
 resize(frame, frame, Size(frame.size().width/2, frame.size().height/2) );
    //Blur  
    blur(frame, frame, Size(5,5) );

    // Background subtraction
    pMOG->operator()(frame, fg_mask,0.05);

    ////////
    //pre procesing  
    //1 point delete    
     morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element);  

     // threshold
     //threshold intensity image at a given sensitivity value
             cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
        morphOps(binaryImage);

     if(debugMode==true){

    imshow("frame", frame);
    imshow("fg_mask", fg_mask);
    imshow("final", binaryImage);
    }else{
            //if not in debug mode, destroy the windows so we don't see them anymore
            cv::destroyWindow("frame");
            cv::destroyWindow("fg_mask");
            cv::destroyWindow("final");
     }

     //if tracking enabled, search for contours in our thresholded image
        if(trackingEnabled){

            searchForMovement(binaryImage,frame);

     //Find contour  
  ContourImg = binaryImage.clone();  
  //less blob delete  
  vector< vector< Point> > contours;  

   findContours(ContourImg,  
        contours, // a vector of contours  
       CV_RETR_EXTERNAL, // retrieve the external contours  
       CV_CHAIN_APPROX_NONE); // all pixels of each contours  


 vector< Rect > output;  
  vector< vector< Point> >::iterator itc= contours.begin();  
  while (itc!=contours.end()) {  

 //Create bounding rect of object  
 //rect draw on origin image  
 Rect mr= boundingRect(Mat(*itc));  
 rectangle(frame, mr, CV_RGB(255,0,0));  
 ++itc;  
  }  
        }
        imshow("frame", frame);
    // Save foreground mask
    string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
    imwrite("D:\\SO\\temp\\" + name, fg_mask);

    switch(waitKey(10)){

        case 27: //'esc' key has been pressed, exit program.
            return 0;
        case 116: //'t' has been pressed. this will toggle tracking
            trackingEnabled = !trackingEnabled;
            if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
            else cout<<"Tracking enabled."<<endl;
            break;
        case 100: //'d' has been pressed. this will debug mode
            debugMode = !debugMode;
            if(debugMode == true) cout<<"Debug mode enabled."<<endl;
            else cout<<"Debug mode disabled."<<endl;
            break;
        case 112: //'p' has been pressed. this will pause/resume the code.
            pause = !pause;
            if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
            while (pause == true){
                //stay in this loop until 
                switch (waitKey()){
                    //a switch statement inside a switch statement? Mind blown.
                case 112: 
                    //change pause back to false
                    pause = false;
                    cout<<"Code Resumed"<<endl;
                    break;
                }

}
// the camera will be deinitialized automatically in VideoCapture destructor

            }
    }
    }
    //release the capture before re-opening and looping again.
    capture.release();
    }
     return 0;
    }

解决方案

OK I see several strange things in your code. But to answer your question:

In your code, you are opening file, outputting the same values for x and y 10000 times and closing file for each frame. Instead what you should do is open file in start, output only one pair of coordinates per frame then close file in end.

Example code:

Before main loop starts

ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";

In main loop

file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";

After main loop ends

file_.close();

EDIT: Added full code the way I meant for it to be:

#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//#include < opencv2/videoio.hpp>
//#include < opencv2/imgcodecs.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
#include <fstream>

using namespace cv;
using namespace std;

ofstream file_;

//global variables
cv::Mat fg_mask;
cv::Mat frame;
cv::Mat binaryImage;
cv::Mat ContourImg;
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its    position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//our sensitivity value to be used
const static int SENSITIVITY_VALUE = 50;

string intToString(int number){

    //this function has a number input and string output
    std::stringstream ss;
    ss << number;
    return ss.str();
}
void searchForMovement(Mat binaryImage, Mat &framein){
    //notice how we use the '&' operator for objectDetected and cameraFeed. This    is because we wish
    //to take the values passed into the function and manipulate them, rather t  han just working with a copy.
    //eg. we draw to the cameraFeed to be displayed in the main() function.
    bool objectDetected = false;
    Mat temp;
    binaryImage.copyTo(temp);
    //these two vectors needed for output of findContours
    vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    //find contours of filtered image using openCV findContours function
    //findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
    findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours

    //if contours vector is not empty, we have found some objects
    if(contours.size()>0)objectDetected=true;
    else objectDetected = false;

    if(objectDetected){
        //the largest contour is found at the end of the contours vector
        //we will simply assume that the biggest contour is the object we are looking for.
        vector< vector<Point> > largestContourVec;
        largestContourVec.push_back(contours.at(contours.size()-1));
        //make a bounding rectangle around the largest contour then find its centroid
        //this will be the object's final estimated position.
        objectBoundingRectangle = boundingRect(largestContourVec.at(0));
        int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
        int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;

        //update the objects positions by changing the 'theObject' array values
        theObject[0] = xpos , theObject[1] = ypos;
    }
    //make some temp x and y variables so we dont have to type out so much
    int x = theObject[0];
    int y = theObject[1];

    //draw some crosshairs around the object
    circle(framein,Point(x,y),20,Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);

    //write the position of the object to the screen
    putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);

    //save position
    file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";

    //std::cin.get();


}

void morphOps(Mat &thresh){

    //create structuring element that will be used to "dilate" and "erode" image.
    //the element chosen here is a 3px by 3px rectangle

    Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
    //dilate with larger element so make sure object is nicely visible
    Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8

    erode(thresh,thresh,erodeElement);
    erode(thresh,thresh,erodeElement);


    dilate(thresh,thresh,dilateElement);
    dilate(thresh,thresh,dilateElement);

}
int main(int, char**)
{

    file_.open("position.txt");
    file_ <<"these are the position pattern made by the foreground object \n";

    //some boolean variables for added functionality
    bool objectDetected = false;
    //these two can be toggled by pressing 'd' or 't'
    bool debugMode = true;
    bool trackingEnabled = true;
    //pause and resume code
    bool pause = false;
    //video capture object.
    VideoCapture capture;

    while(1){

        //we can loop the video by re-opening the capture every time the video reaches its last frame

        capture.open("Video_003.avi");
        //capture.open(0);

        if(!capture.isOpened()){
            cout<<"ERROR ACQUIRING VIDEO FEED\n";
            getchar();
            return -1;
        }
        double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
        cout << "Frame per seconds : " << fps << endl;

        pMOG = new BackgroundSubtractorMOG();

        //morphology element
        Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );

        int count = -1;

        //check if the video has reach its last frame.
        //we add '-1' because we are reading two frames from the video at a time.
        //if this is not included, we get a memory error!
        while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
            // Get frame from camera
            capture.read(frame);
            // Update counter
            ++count;
            //Resize
            resize(frame, frame, Size(frame.size().width/2, frame.size().height/2) );
            //Blur
            blur(frame, frame, Size(5,5) );

            // Background subtraction
            pMOG->operator()(frame, fg_mask,0.05);

            ////////
            //pre procesing
            //1 point delete
            morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element);

            // threshold
            //threshold intensity image at a given sensitivity value
            cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
            morphOps(binaryImage);

            if(debugMode==true){

                imshow("frame", frame);
                imshow("fg_mask", fg_mask);
                imshow("final", binaryImage);
            }else{
                //if not in debug mode, destroy the windows so we don't see them anymore
                cv::destroyWindow("frame");
                cv::destroyWindow("fg_mask");
                cv::destroyWindow("final");
            }

            //if tracking enabled, search for contours in our thresholded image
            if(trackingEnabled){

                searchForMovement(binaryImage,frame);

                //Find contour
                ContourImg = binaryImage.clone();
                //less blob delete
                vector< vector< Point> > contours;

                findContours(ContourImg,
                             contours, // a vector of contours
                             CV_RETR_EXTERNAL, // retrieve the external contours
                             CV_CHAIN_APPROX_NONE); // all pixels of each contours


                vector< Rect > output;
                vector< vector< Point> >::iterator itc= contours.begin();
                while (itc!=contours.end()) {

                    //Create bounding rect of object
                    //rect draw on origin image
                    Rect mr= boundingRect(Mat(*itc));
                    rectangle(frame, mr, CV_RGB(255,0,0));
                    ++itc;
                }
            }
            imshow("frame", frame);
            // Save foreground mask
            string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
            imwrite("D:\\SO\\temp\\" + name, fg_mask);

            switch(waitKey(10)){

                case 27: //'esc' key has been pressed, exit program.
                    return 0;
                case 116: //'t' has been pressed. this will toggle tracking
                    trackingEnabled = !trackingEnabled;
                    if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
                    else cout<<"Tracking enabled."<<endl;
                    break;
                case 100: //'d' has been pressed. this will debug mode
                    debugMode = !debugMode;
                    if(debugMode == true) cout<<"Debug mode enabled."<<endl;
                    else cout<<"Debug mode disabled."<<endl;
                    break;
                case 112: //'p' has been pressed. this will pause/resume the code.
                    pause = !pause;
                    if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
                        while (pause == true){
                            //stay in this loop until
                            switch (waitKey()){
                                //a switch statement inside a switch statement? Mind blown.
                                case 112:
                                    //change pause back to false
                                    pause = false;
                                    cout<<"Code Resumed"<<endl;
                                    break;
                            }

                        }
                        // the camera will be deinitialized automatically in VideoCapture destructor

                    }
            }
        }
        //release the capture before re-opening and looping again.
        capture.release();
        //Close position log
        file_.close();
    }
    return 0;
}

这篇关于如何将被跟踪对象的位置(x,y)写入文本文件?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆