Hey WalterWohlkinger - yes, this is possible in v3.2.1 on RVC2. setISP(...) from v2 maps to Camera.requestOutput(...), and decimation stays in StereoDepth config.
import depthai as dai
FPS = 30.0
COLOR_SIZE = (704, 396) # target color
DEPTH_SIZE = (160, 100) # target depth (after decimation/resize)
with dai.Pipeline() as p:
camRgb = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
left = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
right = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
stereo = p.create(dai.node.StereoDepth)
# Stereo input size (640x400 -> decimation factor 4 => 160x100)
left.requestOutput((640, 400), fps=FPS).link(stereo.left)
right.requestOutput((640, 400), fps=FPS).link(stereo.right)
# v2 decimation equivalent
stereo.initialConfig.postProcessing.decimationFilter.decimationFactor = 4
stereo.initialConfig.postProcessing.decimationFilter.decimationMode = (
dai.StereoDepthConfig.PostProcessing.DecimationFilter.DecimationMode.PIXEL_SKIPPING
)
# v2 setISP(...) equivalent in v3
colorOut = camRgb.requestOutput(
size=COLOR_SIZE,
type=dai.ImgFrame.Type.RGB888i,
resizeMode=dai.ImgResizeMode.CROP,
fps=FPS,
enableUndistortion=True
)
# Align depth to color camera
colorOut.link(stereo.inputAlignTo)
# Force depth output size
stereo.setOutputSize(*DEPTH_SIZE)
stereo.setOutputKeepAspectRatio(False)
qColor = colorOut.createOutputQueue()
qDepth = stereo.depth.createOutputQueue()
p.start()
while p.isRunning():
c = qColor.get()
d = qDepth.get()
print(f"color={c.getWidth()}x{c.getHeight()} depth={d.getWidth()}x{d.getHeight()}")
Got this output:
color=704x396 depth=160x100 (as expected).
If you use the RGBD node for colored point cloud, color and depth must have the same size, for mixed sizes, use separate color + depth streams like above.
Thanks,
Oskar