几天前我开始研究立体视觉,我在获取视差图方面遇到了一些麻烦 . 这是我正在使用的源代码..

import numpy as np
from sklearn.preprocessing import normalize
import cv2

print('loading images...')
imgL = cv2.imread('imgL.png',0)  # downscale images for faster processing if you like
imgR = cv2.imread('imgR.png',0)

# SGBM Parameters -----------------
window_size = 5                     # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely

left_matcher = cv2.StereoSGBM_create(
    minDisparity=0,
    numDisparities = 32,             # max_disp has to be dividable by 16 f. E. HH 192, 256
    blockSize=3,
    P1 = 8 * 5 * window_size ** 2,    # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
    P2 = 16 * 5 * window_size ** 2,
    disp12MaxDiff=1,
    uniquenessRatio=15,
    speckleWindowSize=0,
    speckleRange=2,
    preFilterCap=63,
)

right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)

# FILTER Parameters
lmbda = 80000
sigma = 1.2
visual_multiplier = 1

wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)

displ = left_matcher.compute(imgL, imgR)
dispr = right_matcher.compute(imgR, imgL)
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, imgL, None, dispr)  

filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
filteredImg = np.uint8(filteredImg)

filteredImg = cv2.applyColorMap(filteredImg, cv2.COLORMAP_JET)

cv2.imshow('Disparity Map', filteredImg)

cv2.imshow('camL', imgL)
cv2.imshow('camR', imgR)

cv2.waitKey(0)
cv2.destroyAllWindows()

这段代码完美适用于数据集图像,但是当我使用我的图像时,即使尝试数百种块大小和差异数量的组合,它也只是不能正常工作......

Dataset Image - My image

因此,这让我相信它有一些东西可以处理立体声标准的校准和校正......对于校准,我已经尽可能地对准了摄像机,并在几个中拍摄了一组47张棋盘图片不同的职位,并通过下面的代码,我做了校准和整改

import numpy as np
import cv2
import glob

criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

objp = np.zeros((5*6,3), np.float32)
objp[:,:2] = np.mgrid[0:5,0:6].T.reshape(-1,2)

objpoints = [] 
imgpoints = []
imgpoints2 = []

L = glob.glob('L/*.png')
R = glob.glob('R/*.png')
cont=0

print ("Checking images...")

while(cont < len(L)):

    imgL = cv2.imread(L[cont])
    grayL = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
    retL, cornersL = cv2.findChessboardCorners(grayL, (5,6),None)

    if retL == True:

        imgR = cv2.imread(R[cont])
        grayR = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
        retR, cornersR = cv2.findChessboardCorners(grayR, (5,6),None)

        if retR == True:

            objpoints.append(objp)

            cornersL = cv2.cornerSubPix(grayL,cornersL,(11,11),(-1,-1),criteria)
            imgpoints.append(cornersL)

            cornersR = cv2.cornerSubPix(grayR,cornersR,(11,11),(-1,-1),criteria)
            imgpoints2.append(cornersR)

            showL = cv2.drawChessboardCorners(imgL, (5,6), cornersL,retL)
            showR = cv2.drawChessboardCorners(imgR, (5,6), cornersR, retR)

            showL = cv2.resize(showL, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_LANCZOS4)
            showR = cv2.resize(showR, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_LANCZOS4)

        cont += 1

cv2.destroyAllWindows()

print("Calculating...")

ret, mtx1, dist1, rvecs1, tvecs1 = cv2.calibrateCamera(objpoints, imgpoints,  grayL.shape[::-1], None, None)
ret, mtx2, dist2, rvecs2, tvecs2 = cv2.calibrateCamera(objpoints, imgpoints2, grayR.shape[::-1], None, None)

stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
stereocalib_flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH)
stereocalib_retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objpoints, imgpoints, imgpoints2, mtx1, dist1, mtx2, dist2, (960, 1280), criteria = stereocalib_criteria, flags = stereocalib_flags)                    

rectify_scale = 1 # 0=full crop, 1=no crop
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (1280, 960), R, T, alpha = 0)
left_maps  = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (1280, 960), cv2.CV_16SC2)
right_maps = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (1280, 960), cv2.CV_16SC2)

left_img  = cv2.imread('L/L6.png')
right_img = cv2.imread('R/R6.png')

left_img_remap  = cv2.remap(left_img, left_maps[0], left_maps[1], cv2.INTER_LANCZOS4)
right_img_remap = cv2.remap(right_img, right_maps[0], right_maps[1], cv2.INTER_LANCZOS4)

left_img_remap = cv2.resize(left_img_remap, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_LANCZOS4)
right_img_remap = cv2.resize(right_img_remap, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_LANCZOS4)

cv2.imshow("Calibrated Left", left_img_remap)
cv2.imshow("Calibrated Right", right_img_remap)

cv2.imwrite("left.png",left_img_remap)
cv2.imwrite("right.png",right_img_remap)

cv2.waitKey(0)
cont_equals = 0
cv2.destroyAllWindows()

结果似乎很好地对齐,你可以在下面看到......所以,我不确定这里有什么问题 . 有人能帮帮我吗?

Rectify