import depthai as dai
import numpy as np
import cv2
from datetime import timedelta
ALIGN_NODE = True
DEFAULT_FPS = 15
COLOR_WIDTH = 1280
COLOR_HEIGHT = 800
MONO_WIDTH = 1280
MONO_HEIGHT = 800
pipeline = dai.Pipeline()
colorNode = pipeline.create(dai.node.Camera).build(boardSocket=dai.CameraBoardSocket.CAM_A)
colorOut = colorNode.requestOutput((COLOR_WIDTH, COLOR_HEIGHT), fps=DEFAULT_FPS)
monoLeft = pipeline.create(dai.node.Camera).build(boardSocket=dai.CameraBoardSocket.CAM_B)
monoLeftOut = monoLeft.requestOutput((MONO_WIDTH, MONO_HEIGHT), fps=DEFAULT_FPS)
monoRight = pipeline.create(dai.node.Camera).build(boardSocket=dai.CameraBoardSocket.CAM_C)
monoRightOut = monoRight.requestOutput((MONO_WIDTH, MONO_HEIGHT), fps=DEFAULT_FPS)
stereoNode = pipeline.create(dai.node.StereoDepth)
monoLeftOut.link(stereoNode.left)
monoRightOut.link(stereoNode.right)
sync = pipeline.create(dai.node.Sync)
sync.setSyncThreshold(timedelta(milliseconds=80))
colorOut.link(sync.inputs["color"])
if ALIGN_NODE is True:
    alignNode = pipeline.create(dai.node.ImageAlign)
    alignNode.setRunOnHost(False)
    stereoNode.depth.link(alignNode.input)
    colorOut.link(alignNode.inputAlignTo)
    alignNode.outputAligned.link(sync.inputs["depth"])
else:
    stereoNode.depth.link(sync.inputs["depth"])
syncQueue = sync.out.createOutputQueue()
with pipeline:
    pipeline.start()
    cv2.namedWindow("color", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("color", 640, 400)
    cv2.namedWindow("depth", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("depth", 640, 400)
    while pipeline.isRunning():
        syncGroup = syncQueue.tryGet()
        if syncGroup is None:
            continue
        
        assert isinstance(syncGroup, dai.MessageGroup)
        if syncGroup.getNumMessages() == 0:
            continue
        print(f"MSG Group timestamp: {syncGroup.getTimestamp()}")
        for name, payload in syncGroup:
            assert isinstance(payload, dai.ImgFrame)
            print(f"\t[{name}] size: [{payload.getWidth()} x {payload.getHeight()}]")
            if name == "color":
                cv2.imshow(name, payload.getCvFrame())
            elif name == "depth":
                depthMap = payload.getFrame()
                normalizedMap = (depthMap * (255 / stereoNode.initialConfig.getMaxDisparity())).astype(np.uint8)
                colorizedDepth = cv2.applyColorMap(normalizedMap, cv2.COLORMAP_JET)
                cv2.imshow(name, colorizedDepth)
        key = cv2.waitKey(1)
        if key == ord('q'):
            pipeline.stop()
            break
Hey @jakaskerl,
Here's an MRE for the problem. You can toggle the ALIGN_NODE flag to compare the effect of ImageAlign node on the pipeline. You can lower the resolution of the color output and see that the lag is eliminated. Please let me know if you need anything else.
I tested this code on OAK-4-PRO-AF and OAK-4-PRO-W devices.
Thank you.