我正在使用4个分立相机(相同类型)的立体相机装备,但目前只需要一对(cam1和cam2) .
目的是校准立体声对并获得有关场景的3D信息 . 我在Visual Studio 2017中使用Python 3.6和OpenCV 3.4.3 .
我拍摄了28张棋盘校准图案的图像,并使用标准的OpenCV程序单独校准摄像机以及立体摄像机 .
由于校准数据看起来很好并且失真校正工作正常,下一步是校正图像 .
这是事情变得怪异的地方 . 我花了最近3个星期来研究这个并阅读了很多,尝试了很多并且总是得到糟糕的结果 . 我使用cv2.stereoCalibrate(也尝试使用不同的标记,如不同主题中所建议的),cv2.stereoRectify(也使用不同的alpha值),cv2.initUndistortRectifyMap和cv2.remap用于实际重新映射图像(方法1) . 但结果永远不会如想要的那样 .
我最近设法得到纠正的图像,看起来它们实际上是用cv2.uncalibratedRectification纠正的 . 因此我没有使用匹配点(因为SURF和SIFT是不自由的......)但是方法略有不同 . 我的28个校准图像中校准图案的边缘用作输入点 . 这很好用,但经过纠正的图像看起来并不完美 .
这是我的图像(这些都没有校准图像)所以你可以想象我在说什么:
original left and right images
undistorted images
rectified with method 1, alpha=1
rectified with method 1, alpha=0
rectified uncalibrated, best result I got by now
任何人都可以给我一个暗示我使用方法1的错误吗?我已经看到很多类似问题的帖子,但我在那里的评论中找不到解决方案 . 或者这是OpenCV中的错误?
或者有人知道如何改进未经校准的整改?
这是一个包含相关调用的代码段:
# imports
import numpy as np
import cv2
import glob
import argparse
import sys
import os
# size calib array
numEdgeX = 10
numEdgeY = 7
# preface
exitCode = 0
# get directories
pathDir = str(os.path.dirname(os.path.realpath(__file__)))
pathDir = pathDir[:-17]
pathCalib = pathDir + "\\CalibData" + "\\chess"
try:
# define pair
p = 1
cal_path = pathCalib + "\\pair" + str(p)
images_right = glob.glob(cal_path + '\RIGHT/*.bmp')
images_left = glob.glob(cal_path + '\LEFT/*.bmp')
images_left.sort()
images_right.sort()
# termination criteria
criteria = (cv2.TermCriteria_EPS +
cv2.TermCriteria_MAX_ITER, 30, 0.001)
criteria_cal = (cv2.TermCriteria_EPS +
cv2.TermCriteria_MAX_ITER, 30, 1e-5)
# prepare object points, like (0,0,0); (1,0,0); ...; (6,5,0)
objp = np.zeros((numEdgeX*numEdgeY, 3), np.float32)
objp[:, :2] = np.mgrid[0:numEdgeX, 0:numEdgeY].T.reshape(-1, 2)
objpoints = [] # 3d points in real world space
imgpoints_l = [] # 2d points in image plane for calibration
imgpoints_r = [] # 2d points in image plane for calibration
for i, fname in enumerate(images_right):
print(str(i+1) + " out of " + str(len(images_right)))
img_l = cv2.imread(images_left[i])
img_r = cv2.imread(images_right[i])
# convert to cv2
img_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
# find the chess board corners
ret_l, corners_l = cv2.findChessboardCorners(img_l, (numEdgeX, numEdgeY), None)
ret_r, corners_r = cv2.findChessboardCorners(img_r, (numEdgeX, numEdgeY), None)
objpoints.append(objp)
if ret_l is True:
print("image " + str(i+1) + "left - io")
rt = cv2.cornerSubPix(img_l, corners_l, (11, 11),
(-1, -1), criteria)
imgpoints_l.append(corners_l)
if ret_r is True:
print("image " + str(i+1) + "right - io")
rt = cv2.cornerSubPix(img_r, corners_r, (11, 11),
(-1, -1), criteria)
imgpoints_r.append(corners_r)
# get shape
img_shape = img_l.shape[::-1]
### CALIBRATION ###
# calibrate left camera
rt, M1, d1, r1, t1 = cv2.calibrateCamera(
objpoints, imgpoints_l, img_shape, None, None)
# calibrate right camera
rt, M2, d2, r2, t2 = cv2.calibrateCamera(
objpoints, imgpoints_r, img_shape, None, None)
# stereo calibration
flags = (cv2.CALIB_FIX_K5 + cv2.CALIB_FIX_K6)
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
cv2.TERM_CRITERIA_EPS, 100, 1e-5)
#flags = 0
#flags = cv2.CALIB_USE_INTRINSIC_GUESS
#flags = cv2.CALIB_FIX_PRINCIPAL_POINT
#flags = cv2.CALIB_FIX_ASPECT_RATIO
#flags = cv2.CALIB_ZERO_TANGENT_DIST
#flags = cv2.CALIB_FIX_INTRINSIC
#flags = cv2.CALIB_FIX_FOCAL_LENGTH
#flags = cv2.CALIB_FIX_K1...6
#flags = cv2.CALIB_RATIONAL_MODEL
#flags = cv2.CALIB_THIN_PRISM_MODEL
#flags = cv2.CALIB_SAME_FOCAL_LENGTH
#flags = cv2.CALIB_FIX_S1_S2_S3_S4
flags = (cv2.CALIB_FIX_PRINCIPAL_POINT | cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_FIX_FOCAL_LENGTH |
cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5 |
cv2.CALIB_FIX_K6)
T = np.zeros((3, 1), dtype=np.float64)
R = np.eye(3, dtype=np.float64)
ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
objpoints, imgpoints_l,
imgpoints_r, M1, d1, M2,
d2, img_shape,
criteria = stereocalib_criteria,
flags=flags)
# get new optimal camera matrix
newCamMtx1, roi1 = cv2.getOptimalNewCameraMatrix(M1, d1, img_shape, 0, img_shape)
newCamMtx2, roi2 = cv2.getOptimalNewCameraMatrix(M2, d2, img_shape, 0, img_shape)
# rectification and undistortion maps which can be used directly to correct the stereo pair
(rectification_l, rectification_r, projection_l,
projection_r, disparityToDepthMap, ROI_l, ROI_r) = cv2.stereoRectify(
M1, d1, M2, d2, img_shape, R, T,
None, None, None, None, None,
#cv2.CALIB_ZERO_DISPARITY, # principal points of each camera have the same pixel coordinates in rect views
alpha=0) # alpha=1 no pixels lost, alpha=0 pixels lost
leftMapX, leftMapY = cv2.initUndistortRectifyMap(
M1, d1, rectification_l, projection_l,
img_shape, cv2.CV_32FC1)
rightMapX, rightMapY = cv2.initUndistortRectifyMap(
M2, d2, rectification_r, projection_r,
img_shape, cv2.CV_32FC1)
### UNCALIBRATED RECTIFICATION ###
imgpoints_l_undis = []
imgpoints_r_undis = []
for i, fname in enumerate(images_right):
img_l = cv2.imread(images_left[i])
img_r = cv2.imread(images_right[i])
# convert to cv2
img_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
# undistort
img_l_undis = cv2.undistort(img_l, M1, d1, None, newCamMtx1)
img_r_undis = cv2.undistort(img_r, M2, d2, None, newCamMtx2)
# find the chess board corners in undistorted image
ret_l_undis, corners_l_undis = cv2.findChessboardCorners(img_l_undis, (numEdgeX, numEdgeY), None)
ret_r_undis, corners_r_undis = cv2.findChessboardCorners(img_r_undis, (numEdgeX, numEdgeY), None)
if ret_l_undis is True:
rt = cv2.cornerSubPix(img_l_undis, corners_l_undis, (11, 11), (-1, -1), criteria)
for j in range(0, len(rt)):
x = rt[j][0,:]
imgpoints_l_undis.append(x)
if ret_r_undis is True:
rt = cv2.cornerSubPix(img_r_undis, corners_r_undis, (11, 11), (-1, -1), criteria)
for j in range(0, len(rt)):
x = rt[j][0,:]
imgpoints_r_undis.append(x)
# convert to np array
imgpoints_l_undis = np.array(imgpoints_l_undis)
imgpoints_r_undis = np.array(imgpoints_r_undis)
# compute rectification uncalibrated
ret, uncRectMtx1, uncRectMtx2 = cv2.stereoRectifyUncalibrated(imgpoints_l_undis, imgpoints_r_undis, F, img_shape)
### REMAPPING ###
# load images and convert to cv2 format
img_l = cv2.imread(images_left[0])
img_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
img_l_undis = cv2.undistort(img_l, M1, d1, None, newCamMtx1)
img_r = cv2.imread(images_right[0])
img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
img_r_undis = cv2.undistort(img_r, M2, d2, None, newCamMtx2)
# remap
imglCalRect = cv2.remap(img_l, leftMapX, leftMapY, cv2.INTER_LINEAR)
imgrCalRect = cv2.remap(img_r, rightMapX, rightMapY, cv2.INTER_LINEAR)
numpyHorizontalCalibRect = np.hstack((imglCalRect, imgrCalRect))
# warp for uncalibrated rectification
imglUncalRect = cv2.warpPerspective(img_l_undis, uncRectMtx1, img_shape)
imgrUncalRect = cv2.warpPerspective(img_r_undis, uncRectMtx2, img_shape)
numpyHorizontalUncalibRect = np.hstack((imglUncalRect, imgrUncalRect))
### SHOW RESULTS ###
# calculate point arrays for epipolar lines
lineThickness = 5
lineColor = [0, 255, 0]
numLines = 20
interv = round(img_shape[0] / numLines)
x1 = np.zeros((numLines, 1))
y1 = np.zeros((numLines, 1))
x2 = np.full((numLines, 1), (3*img_shape[1]))
y2 = np.zeros((numLines, 1))
for jj in range(0, numLines):
y1[jj] = jj * interv
y2 = y1
for jj in range(0, numLines):
cv2.line(numpyHorizontalCalibRect, (x1[jj], y1[jj]), (x2[jj], y2[jj]),
lineColor, lineThickness)
cv2.line(numpyHorizontalUncalibRect, (x1[jj], y1[jj]), (x2[jj], y2[jj]),
lineColor, lineThickness)
cv2.namedWindow("calibRect", cv2.WINDOW_NORMAL)
cv2.namedWindow("uncalibRect", cv2.WINDOW_NORMAL)
cv2.imshow("calibRect", numpyHorizontalCalibRect)
cv2.imshow("uncalibRect", numpyHorizontalUncalibRect)
cv2.waitKey()
except (IOError, ValueError):
print("An I/O error or a ValueError occurred")
except:
print("An unexpected error occurred")
raise
谢谢!
1 回答
完成!问题是OpenCV将我的图像解释为垂直立体声系统,我只是看着它是水平的 .