欧拉天使估计(相机姿势)利用摄像头和OpenCV库图片 [英] Estimation of euler angels (camera pose) using images from camera and opencv library

查看:1353
本文介绍了欧拉天使估计(相机姿势)利用摄像头和OpenCV库图片的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我工作的一个Android应用程序,我需要估计利用摄像头和OpenCV库图像的3D计划在线摄像头转动。我喜欢来计算欧拉角。

我读<一个href=\"http://stackoverflow.com/questions/11642988/determine-camera-rotation-between-two-360x180-equirectangular-panoramic-images\">this和<一个href=\"http://stackoverflow.com/questions/17027277/android-opencv-homography-to-camera-pose-considering-camera-intrinsics-and-ba?rq=1\">this页我可以估计单应矩阵像<一个href=\"http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html#cameracalibrationopencv\"相对=nofollow>这里。

我的第一个问题是,我真的知道从相机calibrtion摄像机内基质或者是同形矩阵(摄像机外部)足够的估计欧拉角(俯仰,滚转,偏航)?

如果单应矩阵是不够的,我怎么能做到这一点是什么呢?

对不起,我真的跟OpenCV的初学者,像描述的这里。我怎么能在android系统实现欧拉角?

您可以用solvePnPRansac()和decomposeProjectionMatrix计算欧拉角度看我的code。

但它只是返回一个空向量作为双[] = eulerArray {0,0,0}!有人可以帮我吗?什么是错在那里?
非常感谢您的任何回应!

 公共双[] findEulerAngles(MatOfKeyPoint keypoints1,MatOfKeyPoint keypoints2,MatOfDMatch匹配){    关键点[] = K1 keypoints1.toArray();
    关键点[] K2 = keypoints2.toArray();
    清单&LT; D​​Match&GT; matchesList = matches.toList();
    清单&LT;关键点&GT; referenceKeypointsList = keypoints2.toList();
    清单&LT;关键点&GT; sceneKeypointsList = keypoints1.toList();
    //计算最大值和关键点之间的距离最小。
    双maxDist = 0.0;
    双minDist = Double.MAX_VALUE;
    对于(DMatch匹配:matchesList){
        双DIST = match.distance;
        如果(DIST&LT; minDist){
            minDist = DIST;
        }
        如果(DIST&GT; maxDist){
            maxDist = DIST;
        }
    }    //确定基于匹配距离好的关键点。
    清单&LT;&POINT3 GT; goodReferencePointsList =新的ArrayList&LT;&POINT3 GT;();
    ArrayList的&LT;点和GT; goodScenePointsList =新的ArrayList&LT;点和GT;();
    双maxGoodMatchDist = 1.75 * minDist;
    对于(DMatch匹配:matchesList){
        如果(match.distance&LT; maxGoodMatchDist){
            点KK2 = K2 [match.queryIdx] .PT;
            点KK1 = K1 [match.trainIdx] .PT;            POINT3 POINT3 =新POINT3(kk1.x,kk1.y,0.0);
            goodReferencePointsList.add(POINT3);
            goodScenePointsList.add(KK2);
            sceneKeypointsList.get(match.queryIdx).PT);
        }
    }
    如果(goodReferencePointsList.size()4; || goodScenePointsList.size()4;){
        //有找姿势太少好点。
        返回;
    }    MatOfPoint3f goodReferencePoints =新MatOfPoint3f();
    goodReferencePoints.fromList(goodReferencePointsList);
    MatOfPoint2f goodScenePoints =新MatOfPoint2f();
    goodScenePoints.fromList(goodScenePointsList);    MatOfDouble mRMat =新MatOfDouble(3,3,CvType.CV_32F);
    MatOfDouble mTVec =新MatOfDouble(3,1,CvType.CV_32F);    // TODO:解决摄像机内矩阵
    垫内部函数= Mat.eye(3,3,CvType.CV_32F); //虚拟摄像头矩阵
    intrinsics.put(0,0,400);
    intrinsics.put(1,1,400);
    intrinsics.put(0,2 640/2);
    intrinsics.put(1,2 480/2);
    Calib3d.solvePnPRansac(goodReferencePoints,goodScenePoints,内部函数,新MatOfDouble(),mRMat,mTVec);
    MatOfDouble rotCameraMatrix1 =新MatOfDouble(3,1,CvType.CV_32F);
    双[] = rVecArray mRMat.toArray();
    // Calib3d.Rodrigues(mRMat,rotCameraMatrix1);
    双[] = tVecArray mTVec.toArray();    MatOfDouble projMatrix =新MatOfDouble(3,4,CvType.CV_32F); // projMatrix 3x4的输入投影矩阵P.
    projMatrix.put(0,0,rVecArray [0]);
    projMatrix.put(0,1,rVecArray [1]);
    projMatrix.put(0,2,rVecArray [2]);
    projMatrix.put(0,3,0);
    projMatrix.put(1,0,rVecArray [3]);
    projMatrix.put(1,1,rVecArray [4]);
    projMatrix.put(1,2,rVecArray [5]);
    projMatrix.put(1,3,0);
    projMatrix.put(2,0,rVecArray [6]);
    projMatrix.put(2,1,rVecArray [7]);
    projMatrix.put(2,2,rVecArray [8]);
    projMatrix.put(2,3,0);    MatOfDouble cameraMatrix =新MatOfDouble(3,3,CvType.CV_32F); // cameraMatrix输出的3x3相机矩阵K.
    MatOfDouble rotMatrix =新MatOfDouble(3,3,CvType.CV_32F); // rotMatrix输出3x3的外部旋转矩阵R.
    MatOfDouble transVect =新MatOfDouble(4,1,CvType.CV_32F); // transVect输出4X1平移向量T.
    MatOfDouble rotMatrixX =新MatOfDouble(3,3,CvType.CV_32F); // rotMatrixX一个rotMatrixX
    MatOfDouble rotMatrixY =新MatOfDouble(3,3,CvType.CV_32F); // rotMatrixY一个rotMatrixY
    MatOfDouble rotMatrixZ =新MatOfDouble(3,3,CvType.CV_32F); // rotMatrixZ一个rotMatrixZ
    MatOfDouble为:eulerAngles =新MatOfDouble(3,1,CvType.CV_32F); //包含三个旋转欧拉角的度数为:eulerAngles可选三个元素的载体。    Calib3d.decomposeProjectionMatrix(projMatrix,
            cameraMatrix,
            rotMatrix,
            transVect,
            rotMatrixX,
            rotMatrixY,
            rotMatrixZ,
            为:eulerAngles);    双[] = eulerArray eulerAngles.toArray();    返回eulerArray;
}


解决方案

现在的工作流动code对我来说,我已经从分解对应性矩阵欧拉角!我有俯仰,滚转和偏航一些价值观,我不知道,是否有正确的。有任何人衣蝶,我怎么可以测试它?!

 私有静态MatOfDMatch filterMatchesByHomography(MatOfKeyPoint keypoints1,MatOfKeyPoint keypoints2,MatOfDMatch匹配){    清单&LT;点和GT; LP1 =新的ArrayList&LT;点和GT(500);
    清单&LT;点和GT; LP2 =新的ArrayList&LT;点和GT(500);    关键点[] = K1 keypoints1.toArray();
    关键点[] K2 = keypoints2.toArray();    清单&LT; D​​Match&GT; matchesList = matches.toList();    如果(matchesList.size()4;){
        MatOfDMatch垫=新MatOfDMatch();
        返回垫;
    }    //添加匹配关键点,以新的列表申请单应
    对于(DMatch匹配:matchesList){
        点KK1 = K1 [match.queryIdx] .PT;
        点KK2 = K2 [match.trainIdx] .PT;
        lp1.add(KK1);
        lp2.add(KK2);
    }    MatOfPoint2f srcPoints =新MatOfPoint2f(lp1.toArray(新点[0]));
    MatOfPoint2f dstPoints =新MatOfPoint2f(lp2.toArray(新点[0]));    // ---------------------------------------    垫面具=新垫();
    垫单对应性= Calib3d.findHomography(srcPoints,dstPoints,Calib3d.RANSAC,0.2%,掩模); //查找两个平面之间的透视变换。 --- Calib3d.LMEDS    垫姿势= cameraPoseFromHomography(单应);    //分解旋转矩阵eulerangle
    节距= Math.atan2(pose.get(2,1)[0],pose.get(2,2)[0]); // arctan2(R32,R33)
    辊= Math.atan2(-1 * pose.get(2,0)[0],的Math.sqrt(Math.pow(pose.get(2,1)[0],2)+ Math.pow(姿势。得到(2,2)[0],2))); // arctan2(-r31,开方(R32 ^ 2 + R33 ^ 2))
    偏航= Math.atan2(pose.get(2,0)[0],pose.get(0,0)[0]);    清单&LT; D​​Match&GT; matches_homo =新的ArrayList&LT; D​​Match&GT;();
    INT大小=(INT)mask.size()的高度。
    的for(int i = 0; I&LT;大小;我++){
        如果(mask.get(I,0)[0] == 1){
            DMatch D = matchesList.get(ⅰ);
            matches_homo.add(四);
        }
    }    MatOfDMatch垫=新MatOfDMatch();
    mat.fromList(matches_homo);
    返回垫;
}

这是我的相机同形矩阵构成(见<一href=\"http://stackoverflow.com/questions/17027277/android-opencv-homography-to-camera-pose-considering-camera-intrinsics-and-ba?rq=1\">this 页面太):

 私有静态垫cameraPoseFromHomography(垫高){
    //Log.d(\"D​​EBUG,cameraPoseFromHomography:单对应性+ matToString(H));    垫姿势= Mat.eye(3,4,CvType.CV_32FC1); // 3×4矩阵,摄像机姿势
    浮动NORM1 =(浮点)Core.norm(h.col(0));
    浮动NORM2 =(浮点)Core.norm(h.col(1));
    浮动TNORM =(NORM1 + NORM2)/ 2.0F; //归一化值    垫normalizedTemp =新垫();
    Core.normalize(h.col(0),normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp,CvType.CV_32FC1);
    normalizedTemp.copyTo(pose.col(0)); //规范化旋转,复制列构成    Core.normalize(h.col(1),normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp,CvType.CV_32FC1);
    normalizedTemp.copyTo(pose.col(1)); //规范化旋转和复制的列构成    垫P3 = pose.col(0).cross(pose.col(1)); //计算p1和p2的叉积
    p3.copyTo(pose.col(2)); //第三列一个两列的交叉积    垫TEMP = h.col(2);
    双[] =缓冲新的双[3];
    h.col(2)获得(0,0,缓冲液);
    pose.put(0,3,缓冲器[0] / TNORM); //向量t [R | T]是姿势的最后一列
    pose.put(1,3,缓冲器[1] / TNORM);
    pose.put(2,3,缓冲液[2] / TNORM);    返回姿态;
}

I'm working on a android application and I need to estimate online camera rotation in 3D-plan using images from camera and opencv library. I like to calculate Euler angles.

I have read this and this page and I can estimate homography matrix like here.

My first question is, should I really know the camera intrinsic matrix from camera calibrtion or is the homography matrix (camera extrinsic) enough to estimate euler angles (pitch, roll, yaw)?

If homography matrix is enough, how can I do it exactly?

Sorry, I am really beginner with opencv and cannot decompose "Mat" of homography to rotation matrix and translation matrix like describes here. How can I implement euler angles in android?

You can see my code using solvePnPRansac() and decomposeProjectionMatrix to calculate euler angles.

But it returns just a null-vector as double[] eulerArray = {0,0,0}!!! Can somebody help me?! What is wrong there? Thank you very much for any response!

public double[] findEulerAngles(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){

    KeyPoint[] k1 = keypoints1.toArray();
    KeyPoint[] k2 = keypoints2.toArray();


    List<DMatch> matchesList = matches.toList();
    List<KeyPoint> referenceKeypointsList = keypoints2.toList();
    List<KeyPoint> sceneKeypointsList = keypoints1.toList();
    // Calculate the max and min distances between keypoints.
    double maxDist = 0.0;
    double minDist = Double.MAX_VALUE;
    for(DMatch match : matchesList) {
        double dist = match.distance;
        if (dist < minDist) {
            minDist = dist;
        }
        if (dist > maxDist) {
            maxDist = dist;
        }
    }

    // Identify "good" keypoints based on match distance.
    List<Point3> goodReferencePointsList = new ArrayList<Point3>();
    ArrayList<Point> goodScenePointsList = new ArrayList<Point>();
    double maxGoodMatchDist = 1.75 * minDist;
    for(DMatch match : matchesList) {
        if (match.distance < maxGoodMatchDist) {
            Point kk2 = k2[match.queryIdx].pt;
            Point kk1 = k1[match.trainIdx].pt;

            Point3 point3 = new Point3(kk1.x, kk1.y, 0.0);
            goodReferencePointsList.add(point3);
            goodScenePointsList.add( kk2);
            sceneKeypointsList.get(match.queryIdx).pt);
        }
    }


    if (goodReferencePointsList.size() < 4 || goodScenePointsList.size() < 4) {
        // There are too few good points to find the pose.
        return;
    }

    MatOfPoint3f goodReferencePoints = new MatOfPoint3f();
    goodReferencePoints.fromList(goodReferencePointsList);
    MatOfPoint2f goodScenePoints = new MatOfPoint2f();
    goodScenePoints.fromList(goodScenePointsList);

    MatOfDouble mRMat = new MatOfDouble(3, 3, CvType.CV_32F);
    MatOfDouble mTVec = new MatOfDouble(3, 1, CvType.CV_32F);

    //TODO: solve camera intrinsic matrix
    Mat intrinsics = Mat.eye(3, 3, CvType.CV_32F); // dummy camera matrix
    intrinsics.put(0, 0, 400);
    intrinsics.put(1, 1, 400);
    intrinsics.put(0, 2, 640 / 2);
    intrinsics.put(1, 2, 480 / 2);
    Calib3d.solvePnPRansac(goodReferencePoints, goodScenePoints, intrinsics, new MatOfDouble(), mRMat, mTVec);
    MatOfDouble rotCameraMatrix1 = new MatOfDouble(3, 1, CvType.CV_32F);
    double[] rVecArray = mRMat.toArray();
    // Calib3d.Rodrigues(mRMat, rotCameraMatrix1);
    double[] tVecArray = mTVec.toArray();

    MatOfDouble projMatrix = new MatOfDouble(3, 4, CvType.CV_32F); //projMatrix 3x4 input projection matrix P.
    projMatrix.put(0, 0, rVecArray[0]);
    projMatrix.put(0, 1, rVecArray[1]);
    projMatrix.put(0, 2, rVecArray[2]);
    projMatrix.put(0, 3, 0);
    projMatrix.put(1, 0, rVecArray[3]);
    projMatrix.put(1, 1, rVecArray[4]);
    projMatrix.put(1, 2, rVecArray[5]);
    projMatrix.put(1, 3, 0);
    projMatrix.put(2, 0, rVecArray[6]);
    projMatrix.put(2, 1, rVecArray[7]);
    projMatrix.put(2, 2, rVecArray[8]);
    projMatrix.put(2, 3, 0);

    MatOfDouble cameraMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //cameraMatrix Output 3x3 camera matrix K.
    MatOfDouble rotMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrix Output 3x3 external rotation matrix R.
    MatOfDouble transVect = new MatOfDouble(4, 1, CvType.CV_32F); //transVect Output 4x1 translation vector T.
    MatOfDouble rotMatrixX = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixX a rotMatrixX
    MatOfDouble rotMatrixY = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixY a rotMatrixY
    MatOfDouble rotMatrixZ = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixZ a rotMatrixZ
    MatOfDouble eulerAngles = new MatOfDouble(3, 1, CvType.CV_32F); //eulerAngles Optional three-element vector containing three Euler angles of rotation in degrees.

    Calib3d.decomposeProjectionMatrix( projMatrix,
            cameraMatrix,
            rotMatrix,
            transVect,
            rotMatrixX,
            rotMatrixY,
            rotMatrixZ,
            eulerAngles);

    double[] eulerArray = eulerAngles.toArray();

    return eulerArray;
}

解决方案

Now works the flowing code for me and I have decomposed the euler angles from homography matrix! I have some values for pitch, roll and yaw, which I don't know, whether there are correct. Have somebody any Idee, how can I test it?!

private static MatOfDMatch filterMatchesByHomography(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){

    List<Point> lp1 = new ArrayList<Point>(500);
    List<Point> lp2 = new ArrayList<Point>(500);

    KeyPoint[] k1 = keypoints1.toArray();
    KeyPoint[] k2 = keypoints2.toArray();

    List<DMatch> matchesList = matches.toList();

    if (matchesList.size() < 4){
        MatOfDMatch mat = new MatOfDMatch();
        return mat;
    }

    // Add matches keypoints to new list to apply homography
    for(DMatch match : matchesList){
        Point kk1 = k1[match.queryIdx].pt;
        Point kk2 = k2[match.trainIdx].pt;
        lp1.add(kk1);
        lp2.add(kk2);
    }

    MatOfPoint2f srcPoints = new MatOfPoint2f(lp1.toArray(new Point[0]));
    MatOfPoint2f dstPoints  = new MatOfPoint2f(lp2.toArray(new Point[0]));

    //---------------------------------------

    Mat mask = new Mat();
    Mat homography = Calib3d.findHomography(srcPoints, dstPoints, Calib3d.RANSAC, 0.2, mask); // Finds a perspective transformation between two planes. ---Calib3d.LMEDS

    Mat pose = cameraPoseFromHomography(homography);

    //Decomposing a rotation matrix to eulerangle
    pitch = Math.atan2(pose.get(2, 1)[0], pose.get(2, 2)[0]); // arctan2(r32, r33)
    roll = Math.atan2(-1*pose.get(2, 0)[0], Math.sqrt( Math.pow(pose.get(2, 1)[0], 2) + Math.pow(pose.get(2, 2)[0], 2)) ); // arctan2(-r31, sqrt(r32^2 + r33^2))
    yaw = Math.atan2(pose.get(2, 0)[0], pose.get(0, 0)[0]);

    List<DMatch> matches_homo = new ArrayList<DMatch>();
    int size = (int) mask.size().height;
    for(int i = 0; i < size; i++){          
        if ( mask.get(i, 0)[0] == 1){
            DMatch d = matchesList.get(i);
            matches_homo.add(d);
        }
    }

    MatOfDMatch mat = new MatOfDMatch();
    mat.fromList(matches_homo);
    return mat;
}

This is my camera pose from homography matrix (see this page too):

private static Mat cameraPoseFromHomography(Mat h) {
    //Log.d("DEBUG", "cameraPoseFromHomography: homography " + matToString(h));

    Mat pose = Mat.eye(3, 4, CvType.CV_32FC1);  // 3x4 matrix, the camera pose
    float norm1 = (float) Core.norm(h.col(0));
    float norm2 = (float) Core.norm(h.col(1));
    float tnorm = (norm1 + norm2) / 2.0f;       // Normalization value

    Mat normalizedTemp = new Mat();
    Core.normalize(h.col(0), normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
    normalizedTemp.copyTo(pose.col(0)); // Normalize the rotation, and copies the column to pose

    Core.normalize(h.col(1), normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);    
    normalizedTemp.copyTo(pose.col(1));// Normalize the rotation and copies the column to pose

    Mat p3 = pose.col(0).cross(pose.col(1)); // Computes the cross-product of p1 and p2
    p3.copyTo(pose.col(2));// Third column is the crossproduct of columns one and two

    Mat temp = h.col(2);
    double[] buffer = new double[3];
    h.col(2).get(0, 0, buffer);
    pose.put(0, 3, buffer[0] / tnorm);  //vector t [R|t] is the last column of pose
    pose.put(1, 3, buffer[1] / tnorm);
    pose.put(2, 3, buffer[2] / tnorm);

    return pose;
}

这篇关于欧拉天使估计(相机姿势)利用摄像头和OpenCV库图片的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆