首页 文章

OpenCV相机立体声校准 - 奇怪的结果

提问于
浏览
1

我正在尝试使用OpenCV校准我的Minoru立体相机 . 以下代码首先校准相机,然后纠正结果 . 当我运行代码时,整改结果有点奇怪 .
我附上了其中一个结果 .
如果有人可以查看我的代码,那将非常有用 .
谢谢马克斯

码:

#include <opencv2/core/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/contrib/contrib.hpp>
#include "libcam.h"
#include <stdio.h>
#include <iostream>

using namespace cv;
using namespace std;

void loadImagePair(Mat &img1, Mat &img2, int i) {

    stringstream ss1, ss2;

    ss1 << "data/saves/" << i << "_1.png";
    ss2 << "data/saves/" << i << "_2.png";

    /*if (i < 10) {
     ss1 << "data/martinperris_images/left0" << i << ".ppm";
     ss2 << "data/martinperris_images/right0" << i << ".ppm";
     } else {
     ss1 << "data/martinperris_images/left" << i << ".ppm";
     ss2 << "data/martinperris_images/right" << i << ".ppm";
     }*/

    img1 = imread(ss1.str());
    img2 = imread(ss2.str());
}

int main(int argc, char** argv) {

    // The camera properties
    int w = 640;
    int h = 480;
    int fps = 20;

    // The chessboard properties
    CvSize chessboardSize(9, 6);
    float squareSize = 1.0f;

    // This should contain the physical location of each corner, but since we don't know them, we are assigning constant positions
    vector<vector<Point3f> > objPoints;
    // The chessboard corner points in the images
    vector<vector<Point2f> > imagePoints1, imagePoints2;
    vector<Point2f> corners1, corners2;

    // The constant positions of each obj points
    vector<Point3f> obj;
    for (int x = 0; x < chessboardSize.width; x++) {
        for (int y = 0; y < chessboardSize.height; y++) {
            obj.push_back(Point3f(x * squareSize, y * squareSize, 0));
        }
    }
    /*for (int i = 0; i < chessboardSize.width * chessboardSize.height; i++) {
     obj.push_back(Point3f(i / chessboardSize.width, i % chessboardSize.height, 0.0f));
     }*/

    // The images, which are proceeded
    Mat img1, img2;
    // The grayscale versions of the images
    Mat gray1, gray2;

    // Get the image count
    int imageCount;
    cout << "How much images to load: " << endl;
    cin >> imageCount;
    // The image number of the current image (nullbased)
    int i = 0;
    // Whether the chessboard corners in the images were found
    bool found1 = false, found2 = false;

    while (i < imageCount) {

        // Load the images
        cout << "Attempting to load image pair " << i << endl;
        loadImagePair(img1, img2, i);
        cout << "Loaded image pair" << endl;

        // Convert to grayscale images
        cvtColor(img1, gray1, CV_BGR2GRAY);
        cvtColor(img2, gray2, CV_BGR2GRAY);

        // Find chessboard corners
        found1 = findChessboardCorners(img1, chessboardSize, corners1, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        found2 = findChessboardCorners(img2, chessboardSize, corners2, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

        cout << "found 1/2: " << found1 << "/" << found2 << endl;

        // Find corners to subpixel accuracy
        if (found1) {
            cornerSubPix(gray1, corners1, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
            drawChessboardCorners(gray1, chessboardSize, corners1, found1);
        }
        if (found2) {
            cornerSubPix(gray2, corners2, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
            drawChessboardCorners(gray2, chessboardSize, corners2, found2);
        }

        // Store corners
        if (found1 && found2) {
            imagePoints1.push_back(corners1);
            imagePoints2.push_back(corners2);
            objPoints.push_back(obj);
            cout << "Corners stored" << endl;
            i++;
        }
        // Error
        else {
            cout << "Corners not found! Stopping" << endl;
            return 0;
        }
    }

    cout << "Starting calibration" << endl;
    Mat CM1 = Mat(3, 3, CV_64F);
    Mat CM2 = Mat(3, 3, CV_64F);
    Mat D1 = Mat(1, 5, CV_64F);
    Mat D2 = Mat(1, 5, CV_64F);
    Mat R = Mat(3, 3, CV_64F);
    Mat T = Mat(3, 1, CV_64F);
    Mat E = Mat(3, 3, CV_64F);
    Mat F = Mat(3, 3, CV_64F);
    //stereoCalibrate(objPoints, imagePoints1, imagePoints2, CM1, D1, CM2, D2, img1.size(), R, T, E, F,
    //CV_CALIB_SAME_FOCAL_LENGTH | CV_CALIB_ZERO_TANGENT_DIST, cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 100, 1e-5));
    stereoCalibrate(objPoints, imagePoints1, imagePoints2, CM1, D1, CM2, D2, img1.size(), R, T, E, F, 0,
            cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 100, 1e-5));
    cout << "Done calibration" << endl;

    cout << "Starting rectification" << endl;
    Mat R1 = Mat(3, 3, CV_64F);
    Mat R2 = Mat(3, 3, CV_64F);
    Mat P1 = Mat(3, 4, CV_64F);
    Mat P2 = Mat(3, 4, CV_64F);
    Mat Q = Mat(4, 4, CV_64F);
    stereoRectify(CM1, D1, CM2, D2, img1.size(), R, T, R1, R2, P1, P2, Q);
    cout << "Done rectification" << endl;

    cout << "Starting to store results" << endl;
    FileStorage fs("stereocalib.yml", FileStorage::WRITE);
    fs << "CM1" << CM1;
    fs << "CM2" << CM2;
    fs << "D1" << D1;
    fs << "D2" << D2;
    fs << "R" << R;
    fs << "T" << T;
    fs << "E" << E;
    fs << "F" << F;
    fs << "R1" << R1;
    fs << "R2" << R2;
    fs << "P1" << P1;
    fs << "P2" << P2;
    fs << "Q" << Q;
    fs.release();
    cout << "Done storing results" << endl;

    cout << "Starting to apply undistort" << endl;
    Mat map1x = Mat(img1.size().height, img1.size().width, CV_32F);
    Mat map1y = Mat(img1.size().height, img1.size().width, CV_32F);
    Mat map2x = Mat(img2.size().height, img2.size().width, CV_32F);
    Mat map2y = Mat(img2.size().height, img2.size().width, CV_32F);
    initUndistortRectifyMap(CM1, D1, R1, P1, img1.size(), CV_32FC1, map1x, map1y);
    initUndistortRectifyMap(CM2, D2, R2, P2, img2.size(), CV_32FC1, map2x, map2y);
    cout << "Done applying undistort" << endl;

    // The rectified images
    Mat imgU1 = Mat(img1.size(), img1.type()), imgU2 = Mat(img2.size(), img2.type());

    // Show rectified images
    i = 0;
    while (i < imageCount) {

        // Load the images
        cout << "Attempting to load image pair " << i << endl;
        loadImagePair(img1, img2, i);
        cout << "Loaded image pair" << endl;
        i++;

        remap(img1, imgU1, map1x, map1y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
        remap(img2, imgU2, map2x, map2y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
        //remap(img1, imgU1, map1x, map1y, INTER_LINEAR, BORDER_DEFAULT);
        //remap(img2, imgU2, map2x, map2y, INTER_LINEAR, BORDER_DEFAULT);

        imshow("img1", img1);
        imshow("img2", img2);
        imshow("rec1", imgU1);
        imshow("rec2", imgU2);

        int key = waitKey(0);
        if (key == 'q') {
            break;
        }
    }

    return 0;
}

图片:
enter image description here

1 回答

  • 1

    最后我发现了,问题是什么 . 图像很好,但我用错误的方式定义了obj Vector . 它一定要是:

    // The constant positions of each obj points
        vector<Point3f> obj;
        for (int y = 0; y < chessboardSize.height; y++) {
            for (int x = 0; x < chessboardSize.width; x++) {
                obj.push_back(Point3f(y * squareSize, x * squareSize, 0));
            }
        }
    

    现在我的校准结果为1.57324,我觉得这很好 .

相关问题