I’m using my OAK-D LR camera along with a LiDAR sensor. I’m trying to compute the extrinsics between the two sensors. As part of this process I need the calibrated intrinsic matrix and 4 distortion coefficients. I think the intrinsic calibration matrix from the center camera based on the saved RGB image sizes is correct. However using the first 4 distortion coefficients to undistort the image make the image look really warped and wrong. Are the images from the camera already undistorted? If so is it correct to use all zeros as my distortion coefficients? If this just means they are bad parameters where to I go to find a calibration process (not using ROS)?
Distortion Parameters Incorrect
crs287
Likely the calibration is ok, but there are some crops in the pipeline which could make it so the intrinsics are wrong. Make sure you check your calibration file - luxonis/depthai-pythonblob/main/examples/calibration/calibration_dump.py and check the resolution specified for each sensor. That resolution is the one at which the camera was calibrated and for which the intrinsics/distortion parameters should be absolutely correct.
Thanks,
Jaka
Here is my print out for camera data. I've been using the first distortion coefficients (2.53, -2.96, -0.0, -0.0
)
I see it says height is 1200 and width 1920. So I probably need my images to be that size to work with the given intrinsic and distortion parameters?
Also which set of parameters belongs to which camera with this print off? I'm just running the calibration_dumpy.py
code and copy and pasted the user camera data portion of the print out.
"cameraData": [
[
0,
{
"cameraType": 0,
"distortionCoeff": [
2.5341603755950928,
-2.9557416439056396,
-0.0003312704211566597,
-0.0004033033037558198,
1.7105742692947388,
2.47755765914917,
-2.8600361347198486,
1.694322109222412,
0.0,
0.0,
0.0,
0.0,
0.003867920720949769,
0.001242463244125247
],
"extrinsics": {
"rotationMatrix": [
[
0.0,
0.0,
0.0
],
[
0.0,
0.0,
0.0
],
[
0.0,
0.0,
0.0
]
],
"specTranslation": {
"x": -0.0,
"y": -0.0,
"z": -0.0
},
"toCameraSocket": -1,
"translation": {
"x": 0.0,
"y": 0.0,
"z": 0.0
}
},
"height": 1200,
"intrinsicMatrix": [
[
1138.1864013671875,
0.0,
963.575439453125
],
[
0.0,
1137.58154296875,
611.5448608398438
],
[
0.0,
0.0,
1.0
]
],
"lensPosition": 0,
"specHfovDeg": 77.30000305175781,
"width": 1920
}
],
[
1,
{
"cameraType": 0,
"distortionCoeff": [
4.8970136642456055,
37.852840423583984,
-0.0001479593920521438,
-0.00028254144126549363,
-4.79265022277832,
4.97584342956543,
37.25578689575195,
-3.5204930305480957,
0.0,
0.0,
0.0,
0.0,
-0.0033813314512372017,
-0.002395808231085539
],
"extrinsics": {
"rotationMatrix": [
[
0.9999895095825195,
-0.002153374720364809,
-0.0040421574376523495
],
[
0.0021413934882730246,
0.9999933242797852,
-0.0029660467989742756
],
[
0.004048517439514399,
0.0029573598876595497,
0.9999874234199524
]
],
"specTranslation": {
"x": -15.0,
"y": 0.0,
"z": 0.0
},
"toCameraSocket": 2,
"translation": {
"x": -15.033685684204102,
"y": -0.010391591116786003,
"z": -0.1848541498184204
}
},
"height": 1200,
"intrinsicMatrix": [
[
1145.872314453125,
0.0,
956.6023559570312
],
[
0.0,
1144.760009765625,
565.30810546875
],
[
0.0,
0.0,
1.0
]
],
"lensPosition": 0,
"specHfovDeg": 77.30000305175781,
"width": 1920
}
],
[
2,
{
"cameraType": 0,
"distortionCoeff": [
8.443777084350586,
27.807140350341797,
0.0004656983073800802,
0.0002269611431984231,
-1.952384114265442,
8.491683006286621,
27.2432918548584,
-0.9128970503807068,
0.0,
0.0,
0.0,
0.0,
-0.00248124310746789,
-0.003077663714066148
],
"extrinsics": {
"rotationMatrix": [
[
0.9997866153717041,
0.0009325880673713982,
0.020635638386011124
],
[
-0.0007558906218037009,
0.9999629855155945,
-0.008568876422941685
],
[
-0.02064286731183529,
0.00855144951492548,
0.9997503161430359
]
],
"specTranslation": {
"x": 5.0,
"y": 0.0,
"z": 0.0
},
"toCameraSocket": 0,
"translation": {
"x": 5.043720722198486,
"y": -0.0019076529424637556,
"z": -0.027802081778645515
}
},
"height": 1200,
"intrinsicMatrix": [
[
1146.215576171875,
0.0,
942.7471923828125
],
[
0.0,
1145.73974609375,
592.1021728515625
],
[
0.0,
0.0,
1.0
]
],
"lensPosition": 0,
"specHfovDeg": 77.30000305175781,
"width": 1920
}
]
],
The distortion parameters are definitely off or the image has already been undistorted, because using OpenCV's standard undistort method with these parameters I get the following warped image.
Original Image:
Undistorted Image:
Here's my undistort script:
import cv2
import numpy as np
import time
from datetime import datetime, timezone
import depthai as dai
# OAK-D Thread
def oak_d_thread(pipeline, data_dir):
# 1. Capture "monotonic now" (time since host PC boot, unaffected by system clock changes)
monotonic_ref = dai.Clock.now() # Returns datetime.timedelta
# 2. Capture "UTC now" at the same instant
utc_ref = datetime.now(timezone.utc) # A naive datetime in UTC
# 3. Compute offset: (UTC time) - (monotonic time)
offset = utc_ref - monotonic_ref
frame_cnt = 0
with dai.Device(pipeline) as device:
calib_data = device.readCalibration2()
## Get Intrinsics for a given camera [fx,0,cx; 0,fy,cy; 0,0,1]
intrinsics = calib_data.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A)
K = np.asarray(intrinsics)
dist_coeff = calib_data.getDistortionCoefficients(dai.CameraBoardSocket.CAM_A)
dist = np.asarray(dist_coeff)[:4]
q_rgb = device.getOutputQueue("rgb")
while True:
in_rgb = q_rgb.tryGet()
if in_rgb is not None:
if frame_cnt == 0:
frame_cnt += 1
continue
frame = in_rgb.getCvFrame()
cam_deltatime = in_rgb.getTimestamp()
cam_time = cam_deltatime + offset
print(f"Camera: Timestamp {cam_time}")
# Undistort
mapx, mapy = cv2.initUndistortRectifyMap(K, dist, None, K, (frame.shape[1],frame.shape[0]), 5)
undistorted_img = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
# Save Camera Image
cv2.imwrite(f"{data_dir}/oakd_cam/calib/img_{cam_time.timestamp()}.jpg", frame)
cv2.imwrite(f"{data_dir}/oakd_cam/calib/undistorted_img_{cam_time.timestamp()}.jpg", undistorted_img)
frame_cnt += 1
# if frame_cnt > 50:
# break
# Main Program
if __name__ == "__main__":
data_dir = '/data/'
pipeline = dai.Pipeline()
cam_rgb = pipeline.createColorCamera()
cam_rgb.setFps(0.5)
cam_rgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
cam_rgb.setPreviewSize(1920,1200)#(1024, 512)
cam_rgb.setInterleaved(False)
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
# Threads for both devices
oak_d_thread(pipeline, data_dir)
print("Finished processing LiDAR and Camera!")
Using all 14 coefficients did seem to work better, thank you! I assumed that it would work with only 4 since OpenCV mentions that their undistort methods work with 4, 5, 8, 12, or 14 distortion coefficients.