基于OpenCV的双目摄像头测距(误差小)

简介: 首先进行双目摄像头定标,获取双目摄像头内部的参数后,进行测距;本文的双目视觉测距是基于BM算法。注意:双目定标的效果会影响测距的精准度,建议大家在做双目定标时,做好一些(尽量让误差小)。

前言

首先进行双目摄像头定标,获取双目摄像头内部的参数后,进行测距;本文的双目视觉测距是基于BM算法。注意:双目定标的效果会影响测距的精准度,建议大家在做双目定标时,做好一些(尽量让误差小)。

一、双目测距--输入图片

效果1:
1

效果2:
2
本人通过测试,误差是1cm.

其中参数:BlockSize、UniquenessRatio、NumDisparities 根据实际情况来调整;
选择C++运行效率高,BM算法可以自定义修改,比较灵活;尝试过Python版的BM算法双目测距,效果没C++好。
源代码:

/*        双目测距        */
#include <opencv2/opencv.hpp>  
#include <iostream>  
#include <math.h> 
using namespace std;
using namespace cv;

const int imageWidth = 640;                             //摄像头的分辨率  
const int imageHeight = 360;
Vec3f  point3;   
float d;
Size imageSize = Size(imageWidth, imageHeight);

Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;

Rect validROIL;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域  
Rect validROIR;

Mat mapLx, mapLy, mapRx, mapRy;     //映射表  
Mat Rl, Rr, Pl, Pr, Q;              //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat xyz;              //三维坐标

Point origin;         //鼠标按下的起始点
Rect selection;      //定义矩形选框
bool selectObject = false;    //是否选择对象

int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);

/*事先标定好的左相机的内参矩阵
fx 0 cx
0 fy cy
0  0  1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 418.523322187048, -1.26842201390676, 343.908870120890,
    0, 421.222568242056, 235.466208987968,
    0, 0, 1);
//获得的畸变参数

/*418.523322187048    0    0
-1.26842201390676    421.222568242056    0
344.758267538961    243.318992284899    1 */ //2

Mat distCoeffL = (Mat_<double>(5, 1) << 0.006636837611004, 0.050240447649195, 0.006681263320267, 0.003130367429418, 0);
//[0.006636837611004,0.050240447649195] [0.006681263320267,0.003130367429418]

/*事先标定好的右相机的内参矩阵
fx 0 cx
0 fy cy
0  0  1
*/
Mat cameraMatrixR = (Mat_<double>(3, 3) << 417.417985082506, 0.498638151824367, 309.903372309072,
    0, 419.795432389420, 230.6,
    0, 0, 1);

/*
417.417985082506    0    0
0.498638151824367    419.795432389420    0
309.903372309072    236.256106972796    1
*/ //2

Mat distCoeffR = (Mat_<double>(5, 1) << -0.038407383078874, 0.236392800301615, 0.004121779274885, 0.002296129959664, 0);
//[-0.038407383078874,0.236392800301615]  [0.004121779274885,0.002296129959664]

Mat T = (Mat_<double>(3, 1) << -1.210187345641146e+02, 0.519235426836325, -0.425535566316217);//T平移向量
//[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
                                                             //对应Matlab所得T参数
//Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//rec旋转向量,对应matlab om参数  我 
Mat rec = (Mat_<double>(3, 3) << 0.999341122700880, -0.00206388651740061, 0.0362361815232777,
    0.000660748031451783, 0.999250989651683, 0.0386913826603732,
    -0.0362888948713456, -0.0386419468010579, 0.998593969567432);                //rec旋转向量,对应matlab om参数  我 

/* 0.999341122700880    0.000660748031451783    -0.0362888948713456
-0.00206388651740061    0.999250989651683    -0.0386419468010579
0.0362361815232777    0.0386913826603732    0.998593969567432 */

//Mat T = (Mat_<double>(3, 1) << -48.4, 0.241, -0.0344);//T平移向量
                                                                                              //[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
                                                                                              //对应Matlab所得T参数
//Mat rec = (Mat_<double>(3, 1) << -0.039, -0.04658, 0.00106);//rec旋转向量,对应matlab om参数   倬华

Mat R;//R 旋转矩阵

      /*****立体匹配*****/
void stereo_match(int, void*)
{
    bm->setBlockSize(2 * blockSize + 5);     //SAD窗口大小,5~21之间为宜
    bm->setROI1(validROIL);
    bm->setROI2(validROIR);
    bm->setPreFilterCap(31);
    bm->setMinDisparity(0);  //最小视差,默认值为0, 可以是负值,int型
    bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
    bm->setTextureThreshold(10);
    bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
    bm->setSpeckleWindowSize(100);
    bm->setSpeckleRange(32);
    bm->setDisp12MaxDiff(-1);
    Mat disp, disp8;
    bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
    disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
    reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
    xyz = xyz * 16;
    imshow("disparity", disp8);
}

/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
    if (selectObject)
    {
        selection.x = MIN(x, origin.x);
        selection.y = MIN(y, origin.y);
        selection.width = std::abs(x - origin.x);
        selection.height = std::abs(y - origin.y);
    }

    switch (event)
    {
    case EVENT_LBUTTONDOWN:   //鼠标左按钮按下的事件
        origin = Point(x, y);
        selection = Rect(x, y, 0, 0);
        selectObject = true;
        //cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
          point3 = xyz.at<Vec3f>(origin);
        point3[0];
        //cout << "point3[0]:" << point3[0] << "point3[1]:" << point3[1] << "point3[2]:" << point3[2]<<endl;
        cout << "世界坐标:" << endl;
        cout << "x: " << point3[0] << "  y: " << point3[1] << "  z: " << point3[2] << endl;
         d = point3[0] * point3[0]+ point3[1] * point3[1]+ point3[2] * point3[2];
         d = sqrt(d);   //mm
        // cout << "距离是:" << d << "mm" << endl;
        
         d = d / 10.0;   //cm
         cout << "距离是:" << d << "cm" << endl;

        // d = d/1000.0;   //m
        // cout << "距离是:" << d << "m" << endl;
    
        break;
    case EVENT_LBUTTONUP:    //鼠标左按钮释放的事件
        selectObject = false;
        if (selection.width > 0 && selection.height > 0)
            break;
    }
}


/*****主函数*****/
int main()
{
    /*
    立体校正
    */
    Rodrigues(rec, R); //Rodrigues变换
    stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
        0, imageSize, &validROIL, &validROIR);
    initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
    initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);

    /*
    读取图片
    */
    rgbImageL = imread("image_left_1.jpg", CV_LOAD_IMAGE_COLOR);
    cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);
    rgbImageR = imread("image_right_1.jpg", CV_LOAD_IMAGE_COLOR);
    cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);

    imshow("ImageL Before Rectify", grayImageL);
    imshow("ImageR Before Rectify", grayImageR);

    /*
    经过remap之后,左右相机的图像已经共面并且行对准了
    */
    remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
    remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);

    /*
    把校正结果显示出来
    */
    Mat rgbRectifyImageL, rgbRectifyImageR;
    cvtColor(rectifyImageL, rgbRectifyImageL, CV_GRAY2BGR);  //伪彩色图
    cvtColor(rectifyImageR, rgbRectifyImageR, CV_GRAY2BGR);

    //单独显示
    //rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
    //rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
    imshow("ImageL After Rectify", rgbRectifyImageL);
    imshow("ImageR After Rectify", rgbRectifyImageR);

    //显示在同一张图上
    Mat canvas;
    double sf;
    int w, h;
    sf = 600. / MAX(imageSize.width, imageSize.height);
    w = cvRound(imageSize.width * sf);
    h = cvRound(imageSize.height * sf);
    canvas.create(h, w * 2, CV_8UC3);   //注意通道

                                        //左图像画到画布上
    Mat canvasPart = canvas(Rect(w * 0, 0, w, h));                                //得到画布的一部分  
    resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);     //把图像缩放到跟canvasPart一样大小  
    Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),                //获得被截取的区域    
        cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
    //rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);                      //画上一个矩形  
    cout << "Painted ImageL" << endl;

    //右图像画到画布上
    canvasPart = canvas(Rect(w, 0, w, h));                                      //获得画布的另一部分  
    resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
    Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
        cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
    //rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
    cout << "Painted ImageR" << endl;

    //画上对应的线条
    for (int i = 0; i < canvas.rows; i += 16)
        line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
    imshow("rectified", canvas);

    /*
    立体匹配
    */
    namedWindow("disparity", CV_WINDOW_AUTOSIZE);
    // 创建SAD窗口 Trackbar
    createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
    // 创建视差唯一性百分比窗口 Trackbar
    createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
    // 创建视差窗口 Trackbar
    createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
    //鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)
    setMouseCallback("disparity", onMouse, 0);
    stereo_match(0, 0);

    waitKey(0);
    return 0;
}

流程说明:

先采集左右摄像头的图片,然后,修改一下指定的图片,可以进行测距。
里面有双目摄像头的参数,具体需要自己定标和矫正后,然后,填入。
双目定标可以参考我这篇博客:https://guo-pu.blog.csdn.net/article/details/86602452
双目数据转化可以参考我这篇博客:https://guo-pu.blog.csdn.net/article/details/86710737

详细讲解摄像头参数:

1)Mat cameraMatrixL                                                                左相机的内参矩阵
2)Mat distCoeffL = (Mat_(5, 1) .......                          左相机 畸变参数    即K1,K2,P1,P2,K3。
3) Mat cameraMatrixR                                                               右相机的内参矩阵
4)Mat distCoeffR = (Mat_(5, 1)  .......                          右相机畸变参数    即K1,K2,P1,P2,K3。
5) Mat T = (Mat_(3, 1) << -1.210187345641146e+02, 0.519235426836325, -0.425535566316217);//  相机的 平移向量
6) Mat rec = (Mat_(3, 3) << 0.99934112270088...................        相机的旋转向量 
一共6个相机参数,1、2是 左相机的参数; 3、4是 右相机的参数; 5、6是相机(相对)整体的参数。

二、实时采集摄像头数据,进行双目测距

效果如下图:
3

源代码:

/******************************/
/*        立体匹配和测距        */
/******************************/
#include <opencv2/opencv.hpp>  
#include <iostream>  
#include <math.h> 

using namespace std;
using namespace cv;

const int imageWidth = 640;                             //摄像头的分辨率  
const int imageHeight = 360;
Vec3f  point3;
float d;
Size imageSize = Size(imageWidth, imageHeight);

Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;

Rect validROIL;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域  
Rect validROIR;

Mat mapLx, mapLy, mapRx, mapRy;     //映射表  
Mat Rl, Rr, Pl, Pr, Q;              //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat xyz;              //三维坐标

Point origin;         //鼠标按下的起始点
Rect selection;      //定义矩形选框
bool selectObject = false;    //是否选择对象

int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);

/*事先标定好的左相机的内参矩阵
fx 0 cx
0 fy cy
0  0  1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 418.523322187048, -1.26842201390676, 343.908870120890,
    0, 421.222568242056, 235.466208987968,
    0, 0, 1);
//获得的畸变参数

/*418.523322187048    0    0
-1.26842201390676    421.222568242056    0
344.758267538961    243.318992284899    1 */ //2

Mat distCoeffL = (Mat_<double>(5, 1) << 0.006636837611004, 0.050240447649195, 0.006681263320267, 0.003130367429418, 0);
//[0.006636837611004,0.050240447649195] [0.006681263320267,0.003130367429418]

/*事先标定好的右相机的内参矩阵
fx 0 cx
0 fy cy
0  0  1
*/
Mat cameraMatrixR = (Mat_<double>(3, 3) << 417.417985082506, 0.498638151824367, 309.903372309072,
    0, 419.795432389420, 230.6,
    0, 0, 1);

/*
417.417985082506    0    0
0.498638151824367    419.795432389420    0
309.903372309072    236.256106972796    1
*/ //2

Mat distCoeffR = (Mat_<double>(5, 1) << -0.038407383078874, 0.236392800301615, 0.004121779274885, 0.002296129959664, 0);
//[-0.038407383078874,0.236392800301615]  [0.004121779274885,0.002296129959664]

Mat T = (Mat_<double>(3, 1) << -1.210187345641146e+02, 0.519235426836325, -0.425535566316217);//T平移向量
//[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
                                                             //对应Matlab所得T参数
//Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//rec旋转向量,对应matlab om参数  我 
Mat rec = (Mat_<double>(3, 3) << 0.999341122700880, -0.00206388651740061, 0.0362361815232777,
    0.000660748031451783, 0.999250989651683, 0.0386913826603732,
    -0.0362888948713456, -0.0386419468010579, 0.998593969567432);                //rec旋转向量,对应matlab om参数  我 

/* 0.999341122700880    0.000660748031451783    -0.0362888948713456
-0.00206388651740061    0.999250989651683    -0.0386419468010579
0.0362361815232777    0.0386913826603732    0.998593969567432 */

//Mat T = (Mat_<double>(3, 1) << -48.4, 0.241, -0.0344);//T平移向量                                                                                  //[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
                                                                                          //对应Matlab所得T参数
//Mat rec = (Mat_<double>(3, 1) << -0.039, -0.04658, 0.00106);//rec旋转向量,对应matlab om参数   倬华
Mat R;//R 旋转矩阵

      /*****立体匹配*****/
void stereo_match(int, void*)
{
    bm->setBlockSize(2 * blockSize + 5);     //SAD窗口大小,5~21之间为宜
    bm->setROI1(validROIL);
    bm->setROI2(validROIR);
    bm->setPreFilterCap(31);
    bm->setMinDisparity(0);  //最小视差,默认值为0, 可以是负值,int型
    bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
    bm->setTextureThreshold(10);
    bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
    bm->setSpeckleWindowSize(100);
    bm->setSpeckleRange(32);
    bm->setDisp12MaxDiff(-1);
    Mat disp, disp8;
    bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
    disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
    reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
    xyz = xyz * 16;
    imshow("disparity", disp8);
}

/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
    if (selectObject)
    {
        selection.x = MIN(x, origin.x);
        selection.y = MIN(y, origin.y);
        selection.width = std::abs(x - origin.x);
        selection.height = std::abs(y - origin.y);
    }

    switch (event)
    {
    case EVENT_LBUTTONDOWN:   //鼠标左按钮按下的事件
        origin = Point(x, y);
        selection = Rect(x, y, 0, 0);
        selectObject = true;
        //cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
          point3 = xyz.at<Vec3f>(origin);
        point3[0];
        //cout << "point3[0]:" << point3[0] << "point3[1]:" << point3[1] << "point3[2]:" << point3[2]<<endl;
        cout << "世界坐标:" << endl;
        cout << "x: " << point3[0] << "  y: " << point3[1] << "  z: " << point3[2] << endl;
         d = point3[0] * point3[0]+ point3[1] * point3[1]+ point3[2] * point3[2];
         d = sqrt(d);   //mm
        // cout << "距离是:" << d << "mm" << endl;
        
         d = d / 10.0;   //cm
         cout << "距离是:" << d << "cm" << endl;

        // d = d/1000.0;   //m
        // cout << "距离是:" << d << "m" << endl;
    
        break;
    case EVENT_LBUTTONUP:    //鼠标左按钮释放的事件
        selectObject = false;
        if (selection.width > 0 && selection.height > 0)
            break;
    }
}

/*****主函数*****/
int main()
{
    /*
    立体校正
    */
    Rodrigues(rec, R); //Rodrigues变换
    stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
        0, imageSize, &validROIL, &validROIR);
    initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl, imageSize, CV_32FC1, mapLx, mapLy);
    initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
    /*
    打开摄像头
    */
    VideoCapture cap;

        cap.open(1);                             //打开相机,电脑自带摄像头一般编号为0,外接摄像头编号为1,主要是在设备管理器中查看自己摄像头的编号。

        cap.set(CV_CAP_PROP_FRAME_WIDTH, 2560);  //设置捕获视频的宽度
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 720);  //设置捕获视频的高度

        if (!cap.isOpened())                         //判断是否成功打开相机
        {
            cout << "摄像头打开失败!" << endl;
            return -1;
        }

        Mat frame, frame_L, frame_R;
        cap >> frame;                                //从相机捕获一帧图像
        
        cout << "Painted ImageL" << endl;
        cout << "Painted ImageR" << endl;

        while (1) {
        
            double fScale = 0.5;                         //定义缩放系数,对2560*720图像进行缩放显示(2560*720图像过大,液晶屏分辨率较小时,需要缩放才可完整显示在屏幕)  

            Size dsize = Size(frame.cols*fScale, frame.rows*fScale);
            Mat imagedst = Mat(dsize, CV_32S);

            resize(frame, imagedst, dsize);
            char image_left[200];
            char image_right[200];
            frame_L = imagedst(Rect(0, 0, 640, 360));  //获取缩放后左Camera的图像
        //    namedWindow("Video_L", 1);
        //    imshow("Video_L", frame_L);
            
            frame_R = imagedst(Rect(640, 0, 640, 360)); //获取缩放后右Camera的图像
    //        namedWindow("Video_R", 2);
//            imshow("Video_R", frame_R);
            cap >> frame;
            /*
            读取图片
            */
            //rgbImageL = imread("image_left_1.jpg", CV_LOAD_IMAGE_COLOR);
            cvtColor(frame_L, grayImageL, CV_BGR2GRAY);
            //rgbImageR = imread("image_right_1.jpg", CV_LOAD_IMAGE_COLOR);
            cvtColor(frame_R, grayImageR, CV_BGR2GRAY);

        //    imshow("ImageL Before Rectify", grayImageL);
        //    imshow("ImageR Before Rectify", grayImageR);

            /*
            经过remap之后,左右相机的图像已经共面并且行对准了
            */
            remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
            remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);

            /*
            把校正结果显示出来
            */
            Mat rgbRectifyImageL, rgbRectifyImageR;
            cvtColor(rectifyImageL, rgbRectifyImageL, CV_GRAY2BGR);  //伪彩色图
            cvtColor(rectifyImageR, rgbRectifyImageR, CV_GRAY2BGR);

            //单独显示
            //rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
            //rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
        //    imshow("ImageL After Rectify", rgbRectifyImageL);
        //    imshow("ImageR After Rectify", rgbRectifyImageR);

            //显示在同一张图上
            Mat canvas;
            double sf;
            int w, h;
            sf = 600. / MAX(imageSize.width, imageSize.height);
            w = cvRound(imageSize.width * sf);
            h = cvRound(imageSize.height * sf);
            canvas.create(h, w * 2, CV_8UC3);   //注意通道

                                                //左图像画到画布上
            Mat canvasPart = canvas(Rect(w * 0, 0, w, h));                                //得到画布的一部分  
            resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);     //把图像缩放到跟canvasPart一样大小  
            Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),                //获得被截取的区域    
                cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
            //rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);                      //画上一个矩形  
        //    cout << "Painted ImageL" << endl;

            //右图像画到画布上
            canvasPart = canvas(Rect(w, 0, w, h));                                      //获得画布的另一部分  
            resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
            Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
                cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
            //rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
        //    cout << "Painted ImageR" << endl;

            //画上对应的线条
            for (int i = 0; i < canvas.rows; i += 16)
                line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
            imshow("rectified", canvas);

            /*
            立体匹配
            */
            namedWindow("disparity", CV_WINDOW_AUTOSIZE);
            // 创建SAD窗口 Trackbar
            createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
            // 创建视差唯一性百分比窗口 Trackbar
            createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
            // 创建视差窗口 Trackbar
            createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
            //鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)
            setMouseCallback("disparity", onMouse, 0);
            stereo_match(0, 0);

            waitKey(10);

        } //wheil
    return 0;
}

希望对你有帮助。
如果发现有待优化的地方,欢迎交流。

补充说明:

1.关于如何求出世界坐标?
1)x,y,z 是由
Vec3f point3;
point3 = xyz.at(origin); 来转化的。
cout << "x: " << point3[0] << "  y: " << point3[1] << "  z: " << point3[2] << endl;

2)x,y,z求平方和后开根号,是两点的距离公式,即点(0,0,0)------双目摄像头的中心点,和点(x,y,z)进行两点求距离。

相关文章
|
1月前
|
C++ 计算机视觉
Opencv(C++)系列学习---读取视频文件和打开摄像头
Opencv(C++)系列学习---读取视频文件和打开摄像头
|
4月前
|
存储 编解码 算法
使用Python和OpenCV构建具有人体检测功能的摄像头录制器
使用Python和OpenCV构建具有人体检测功能的摄像头录制器
|
4月前
|
编解码 计算机视觉 Python
OpenCV保存摄像头视频和视频文件操作实战(附Python源码)
OpenCV保存摄像头视频和视频文件操作实战(附Python源码)
218 0
|
4月前
|
计算机视觉 索引 Python
OpenCV中读取、显示、保存摄像头视频讲解与实战(附Python源码)
OpenCV中读取、显示、保存摄像头视频讲解与实战(附Python源码)
44 0
|
4月前
|
计算机视觉 索引
opencv读取MP4文件和摄像头数据
opencv读取MP4文件和摄像头数据
53 0
|
5月前
|
编解码 计算机视觉 C++
OpenCV 打开双目摄像头(python版)
本文主要介绍在OpenCV用使用双目摄像头,包括:打开单目摄像头、设置摄像头参数、拍照、录制视频。
145 0
|
7月前
|
计算机视觉 C++
14.【opencv打开视频以及摄像头】
14.【opencv打开视频以及摄像头】
44 0
|
存储 编解码 计算机视觉
项目实战:Qt+OpenCV操作摄像头拍照、调节参数和视频录制
项目实战:Qt+OpenCV操作摄像头拍照、调节参数和视频录制
项目实战:Qt+OpenCV操作摄像头拍照、调节参数和视频录制
|
数据采集 编解码 计算机视觉
Python+Opencv读取高帧率USB摄像头问题
Python+Opencv读取高帧率USB摄像头问题
297 0
|
数据采集 开发工具 计算机视觉
使用Python+Opencv从摄像头逐帧读取图片保存在本地
使用Python+Opencv,从摄像头的实时视频流中逐帧读取图片,保存到本地
529 0
使用Python+Opencv从摄像头逐帧读取图片保存在本地