Hi @zaccariam
As the log says, max width for stereo is 1280. Your cameras are likely set to more than that.
Script for SR, but might work:
#!/usr/bin/env python3
import random
import time
from sys import maxsize
import cv2
import depthai as dai
import open3d as o3d
COLOR = True
lrcheck = True # Better handling for occlusions
extended = False # Closer-in minimum depth, disparity range is doubled
subpixel = True # Better accuracy for longer distance, fractional disparity 32-levels
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7
median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7
print("StereoDepth config options:")
print(" Left-Right check: ", lrcheck)
print(" Extended disparity:", extended)
print(" Subpixel: ", subpixel)
print(" Median filtering: ", median)
pipeline = dai.Pipeline()
colorLeft = pipeline.create(dai.node.ColorCamera)
colorLeft.setPreviewSize(288, 288)
colorLeft.setResolution(dai.ColorCameraProperties.SensorResolution.THE_720_P)
colorLeft.setBoardSocket(dai.CameraBoardSocket.CAM_B)
colorLeft.setInterleaved(False)
colorLeft.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
#colorLeft.setIspScale(1, 2)
colorRight = pipeline.create(dai.node.ColorCamera)
colorRight.setPreviewSize(288, 288)
colorRight.setBoardSocket(dai.CameraBoardSocket.CAM_C)
colorRight.setResolution(dai.ColorCameraProperties.SensorResolution.THE_720_P)
colorRight.setInterleaved(False)
colorRight.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
#colorRight.setIspScale(1, 2)
print(f'left Isp size = {colorLeft.getIspSize()}')
print(f'left resolution = {colorLeft.getResolutionSize()}')
print(f'left preview size = {colorLeft.getPreviewSize()}')
print(f'left still size = {colorLeft.getStillSize()}')
print(f'left video size = {colorLeft.getVideoSize()}')
print('===============================================')
print(f'right Isp size = {colorRight.getIspSize()}')
print(f'right resolution = {colorRight.getResolutionSize()}')
print(f'right preview size = {colorRight.getPreviewSize()}')
print(f'Right still size = {colorLeft.getStillSize()}')
print(f'right video size = {colorRight.getVideoSize()}')
print("\n\n")
stereo = pipeline.createStereoDepth()
#stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_ACCURACY)
stereo.initialConfig.setMedianFilter(median)
#stereo.setOutputSize(288, 288)
# stereo.initialConfig.setConfidenceThreshold(255)
stereo.setLeftRightCheck(lrcheck)
stereo.setExtendedDisparity(extended)
stereo.setSubpixel(subpixel)
colorLeft.preview.link(stereo.left)
colorRight.preview.link(stereo.right)
config = stereo.initialConfig.get()
##########################################################
config.postProcessing.speckleFilter.enable = False
config.postProcessing.speckleFilter.speckleRange = 50
config.postProcessing.temporalFilter.enable = True
config.postProcessing.spatialFilter.enable = True
config.postProcessing.spatialFilter.holeFillingRadius = 2
config.postProcessing.spatialFilter.numIterations = 1
config.postProcessing.thresholdFilter.maxRange = 2000
config.postProcessing.decimationFilter.decimationFactor = 1
##########################################################
stereo.initialConfig.set(config)
xout_depth = pipeline.createXLinkOut()
xout_depth.setStreamName("depth")
stereo.depth.link(xout_depth.input)
# xout_disparity = pipeline.createXLinkOut()
# xout_disparity.setStreamName('disparity')
# stereo.disparity.link(xout_disparity.input)
xout_colorize = pipeline.createXLinkOut()
xout_colorize.setStreamName("colorize")
xout_rect_left = pipeline.createXLinkOut()
xout_rect_left.setStreamName("rectified_left")
xout_rect_right = pipeline.createXLinkOut()
xout_rect_right.setStreamName("rectified_right")
if COLOR:
colorLeft.preview.link(xout_colorize.input)
else:
stereo.rectifiedRight.link(xout_colorize.input)
stereo.rectifiedLeft.link(xout_rect_left.input)
stereo.rectifiedRight.link(xout_rect_right.input)
class HostSync:
def __init__(self):
self.arrays = {}
def add_msg(self, name, msg):
if not name in self.arrays:
self.arrays[name] = []
# Add msg to array
self.arrays[name].append({"msg": msg, "seq": msg.getSequenceNum()})
synced = {}
for name, arr in self.arrays.items():
for i, obj in enumerate(arr):
if msg.getSequenceNum() == obj["seq"]:
synced[name] = obj["msg"]
break
# If there are 5 (all) synced msgs, remove all old msgs
# and return synced msgs
if len(synced) == 4: # color, left, right, depth, nn
# Remove old msgs
for name, arr in self.arrays.items():
for i, obj in enumerate(arr):
if obj["seq"] < msg.getSequenceNum():
arr.remove(obj)
else:
break
return synced
return False
file_num = 0
with dai.Device(pipeline) as device:
#device.setIrLaserDotProjectorBrightness(1200)
qs = []
qs.append(device.getOutputQueue("depth", maxSize=1, blocking=False))
qs.append(device.getOutputQueue("colorize", maxSize=1, blocking=False))
qs.append(device.getOutputQueue("rectified_left", maxSize=1, blocking=False))
qs.append(device.getOutputQueue("rectified_right", maxSize=1, blocking=False))
try:
from projector_3d import PointCloudVisualizer
except ImportError as e:
raise ImportError(
f"\033[1;5;31mError occured when importing PCL projector: {e}. Try disabling the point cloud \033[0m "
)
calibData = device.readCalibration()
if COLOR:
w, h = colorLeft.getIspSize()
print(f'Width = {w}, Height = {h}')
#intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, dai.Size2f(w, h))
intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, dai.Size2f(w, h))
else:
w, h = colorRight.getResolutionSize()
intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, dai.Size2f(w, h))
print(f'testing = {w}, {h}')
pcl_converter = PointCloudVisualizer(intrinsics, w, h)
serial_no = device.getMxId()
sync = HostSync()
depth_vis, color, rect_left, rect_right = None, None, None, None
while True:
for q in qs:
new_msg = q.tryGet()
if new_msg is not None:
msgs = sync.add_msg(q.getName(), new_msg)
if msgs:
depth = msgs["depth"].getFrame()
color = msgs["colorize"].getCvFrame()
rectified_left = msgs["rectified_left"].getCvFrame()
rectified_right = msgs["rectified_right"].getCvFrame()
depth_vis = cv2.normalize(depth, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depth_vis = cv2.equalizeHist(depth_vis)
depth_vis = cv2.applyColorMap(depth_vis, cv2.COLORMAP_HOT)
#OPEN CV USEES BGR
bgr_image = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)
# Display the BGR image
cv2.imshow("color", bgr_image)
cv2.imshow("rectified_left", rectified_left)
cv2.imshow("rectified_right", rectified_right)
rgb = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
pcl_converter.rgbd_to_projection(depth, rgb)
pcl_converter.visualize_pcd()
key = cv2.waitKey(1)
if key == ord("s"):
timestamp = str(int(time.time()))
num = random.randrange(900)
o3d.io.write_point_cloud(f"{file_num}.pcd", pcl_converter.pcl, compressed=True)
print(f"number for save = {file_num}")
file_num += 1
elif key == ord("q"):
break
Thanks,
Jaka