• DepthAI
  • how to preview and save rectifiedLeft and rectifiedRight images

I tried to preview/save synchronized rectifiedLeft/rectifiedRight images and got stumbled at:

RuntimeError: Queue for stream name 'rectifiedLeft' doesn't exist

My simple preview code looks like this:

from depthai_sdk import Previews
from depthai_sdk.managers import PipelineManager, PreviewManager
import depthai as dai
import cv2

pm = PipelineManager()
pm.createColorCam(xout=True,previewSize=(1280,800))
pm.createLeftCam(xout=True)
pm.createRightCam(xout=True)

with dai.Device(pm.pipeline) as device:
    pv = PreviewManager(display=[Previews.color.name, Previews.rectifiedLeft.name, Previews.rectifiedRight.name],mouseTracker=True)
    pv.createQueues(device)

    while True:
        pv.prepareFrames()
        pv.showFrames()

        if cv2.waitKey(1) == ord('q'):
            break

Could anyone helps to fix the error?

Plus how to add the saving left/right images part?

Please help. Thanks.

it turns out I need to "build" the pipeline for dai.node.StereoDepth and link them as below:

#!/usr/bin/env python3

from pathlib import Path
import cv2
import depthai as dai
import time

# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
depth = pipeline.create(dai.node.StereoDepth)
xoutL = pipeline.create(dai.node.XLinkOut)
xoutR = pipeline.create(dai.node.XLinkOut)
xoutL.setStreamName("rectL")
xoutR.setStreamName("rectR")

# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

# Linking
monoLeft.out.link(depth.left)
monoRight.out.link(depth.right)
depth.rectifiedLeft.link(xoutL.input)
depth.rectifiedRight.link(xoutR.input)


frameno=0
# Connect to device and start pipeline
with dai.Device(pipeline) as device:

    # Output queue will be used to get the grayscale frames from the output defined above
    qRight = device.getOutputQueue(name="rectR", maxSize=4, blocking=False)
    qLeft  = device.getOutputQueue(name="rectL", maxSize=4, blocking=False)
    
    dirNameL = "mono_data/left"
    Path(dirNameL).mkdir(parents=True, exist_ok=True)
    dirNameR = "mono_data/right"
    Path(dirNameR).mkdir(parents=True, exist_ok=True)

    while True:
        inRight = qRight.get()  # Blocking call, will wait until a new data has arrived
        inLeft  = qLeft.get()
        
        # Data is originally represented as a flat 1D array, it needs to be converted into HxW form
        # Frame is transformed and ready to be shown
        cv2.imshow("right", inRight.getCvFrame())
        cv2.imshow("left", inLeft.getCvFrame())

        # After showing the frame, it's being stored inside a target directory as a PNG image
        cv2.imwrite(f"{dirNameR}/{frameno}.png", inRight.getFrame())
        cv2.imwrite(f"{dirNameL}/{frameno}.png", inLeft.getFrame())
        frameno += 1
        
        if cv2.waitKey(1) == ord('q'):
            break

case closed ; ))