@jakaskerl
My last question for now is in regard of the camera intrinsic and extrinsic values. I know that extrinsic values are retrieved from calibration and they do not affect the focal length or FOV. More so its location and orientation. In contrast of the intrinsic values which can be retrieved by a python script that you guys have created. The intrinsic parameters of a camera depend on how it captures the images. Parameters such as focal length, aperture, field-of-view, resolution, etc govern the intrinsic matrix of a camera model. Does the intrinsic or extrinsic value need to be in the script that captures the point cloud(pcd or ply file)? Or does it need to be implemented into the script that does the icp or global registration?
I assume tof-rgb alignment is not affected by the implementation of the values however I am unsure that these values do play an affect on how the camera captures point cloud or capture depth.
Python script from here:
For testing purposes I have added my intrinsic values and and pinhole intrinsic in bold to see if my point cloud would look any different.
import depthai as dai
import numpy as np
import cv2
import time
from datetime import timedelta
import datetime
import os
import sys
try:
import open3d as o3d
except ImportError:
sys.exit("Critical dependency missing: Open3D. Please install it using the command: '{} -m pip install open3d' and then rerun the script.".format(sys.executable))
FPS = 30
RGB_SOCKET = dai.CameraBoardSocket.CAM_C
TOF_SOCKET = dai.CameraBoardSocket.CAM_A
ALIGN_SOCKET = RGB_SOCKET
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
tof = pipeline.create(dai.node.ToF)
camTof = pipeline.create(dai.node.Camera)
sync = pipeline.create(dai.node.Sync)
align = pipeline.create(dai.node.ImageAlign)
out = pipeline.create(dai.node.XLinkOut)
pointcloud = pipeline.create(dai.node.PointCloud)
# Camera intrinsic parameters (ensure I am using the correct calibration values)
fx = 494.35192765 # Update with my calibrated value
fy = 499.48351759 # Update with my calibrated value
cx = 321.84779556 # Update with my calibrated value
cy = 218.30442303 # Update with my calibrated value
intrinsic = o3d.camera.PinholeCameraIntrinsic(width=640, height=480, fx=fx, fy=fy, cx=cx, cy=cy)
# ToF settings
camTof.setFps(FPS)
camTof.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG)
camTof.setBoardSocket(TOF_SOCKET)
tofConfig = tof.initialConfig.get()
# choose a median filter or use none - using the median filter improves the pointcloud but causes discretization of the data
tofConfig.median = dai.MedianFilter.KERNEL_7x7
# tofConfig.median = dai.MedianFilter.KERNEL_5x5
# tofConfig.median = dai.MedianFilter.KERNEL_7x7
tof.initialConfig.set(tofConfig)
# rgb settings
camRgb.setBoardSocket(RGB_SOCKET)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P)
camRgb.setFps(FPS)
camRgb.setIspScale(3,4)
depthSize = (1280,800) #PLEASE SET TO BE SIZE OF THE TOF STREAM
rgbSize = camRgb.getIspSize()
out.setStreamName("out")
sync.setSyncThreshold(timedelta(seconds=(0.5 / FPS)))
rgbSize = camRgb.getIspSize()
# Linking
camRgb.isp.link(sync.inputs["rgb"])
camTof.raw.link(tof.input)
tof.depth.link(align.input)
# align.outputAligned.link(sync.inputs["depth_aligned"])
align.outputAligned.link(pointcloud.inputDepth)
sync.inputs["rgb"].setBlocking(False)
camRgb.isp.link(align.inputAlignTo)
pointcloud.outputPointCloud.link(sync.inputs["pcl"])
sync.out.link(out.input)
out.setStreamName("out")
def colorizeDepth(frameDepth):
invalidMask = frameDepth == 0
try:
minDepth = np.percentile(frameDepth[frameDepth != 0], 3)
maxDepth = np.percentile(frameDepth[frameDepth != 0], 95)
logDepth = np.log(frameDepth, where=frameDepth != 0)
logMinDepth = np.log(minDepth)
logMaxDepth = np.log(maxDepth)
np.nan_to_num(logDepth, copy=False, nan=logMinDepth)
logDepth = np.clip(logDepth, logMinDepth, logMaxDepth)
depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255))
depthFrameColor = np.nan_to_num(depthFrameColor)
depthFrameColor = depthFrameColor.astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET)
depthFrameColor[invalidMask] = 0
except IndexError:
depthFrameColor = np.zeros((frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8)
except Exception as e:
raise e
return depthFrameColor
rgbWeight = 0.4
depthWeight = 0.6
def updateBlendWeights(percentRgb):
global depthWeight
global rgbWeight
rgbWeight = float(percentRgb) / 100.0
depthWeight = 1.0 - rgbWeight
with dai.Device(pipeline) as device:
isRunning = True
q = device.getOutputQueue(name="out", maxSize=4, blocking=False)
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.create_window()
pcd = o3d.geometry.PointCloud()
coordinateFrame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1000, origin=[0,0,0])
vis.add_geometry(coordinateFrame)
first = True
view_control = vis.get_view_control()
while isRunning:
inMessage = q.get()
inColor = inMessage["rgb"]
inPointCloud = inMessage["pcl"]
cvColorFrame = inColor.getCvFrame()
cvRGBFrame = cv2.cvtColor(cvColorFrame, cv2.COLOR_BGR2RGB)
cv2.imshow("color", cvColorFrame)
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('c'):
print("saving...")
current_time = datetime.datetime.now()
formatted_time = current_time.strftime("%Y_%m_%d_%H_%M_%S")
new_output = formatted_time
os.mkdir(new_output)
o3d.io.write_point_cloud(os.path.join(new_output, "tof_pointcloud.ply"), pcd)
cv2.imwrite(os.path.join(new_output, "Image_of_material.png"), cvColorFrame)
print(f"RGB point cloud saved to folder {new_output}")
if inPointCloud:
points = inPointCloud.getPoints().astype(np.float64)
points[:, 1] = -points[:, 1] # Invert Y axis
pcd.points = o3d.utility.Vector3dVector(points)
colors = (cvRGBFrame.reshape(-1, 3) / 255.0).astype(np.float64)
pcd.colors = o3d.utility.Vector3dVector(colors)
if first:
vis.add_geometry(pcd)
first = False
else:
vis.update_geometry(pcd)
vis.poll_events()
vis.update_renderer()
vis.destroy_window()