I used the code below for synced color and depth image live streaming using OAK-D PoE. By connecting the camera to my laptop (ZBook workstation), I only achieved about 4.4 FPS? Is there anything wrong with the code that is making it so slow? I intend to use the camera on a moving vehicle for real time sensing. Any recommendations on how to configure the monochrome cameras code-wise to ensure sharp depth images?
import depthai as dai
import numpy as np
import cv2
from datetime import timedelta
import time
pipeline = dai.Pipeline()
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_800_P)
monoLeft.setCamera("left")
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_800_P)
monoRight.setCamera("right")
color = pipeline.create(dai.node.ColorCamera)
color.initialControl.setAutoExposureLimit(1000)
color.setCamera("color")
stereo = pipeline.create(dai.node.StereoDepth)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
config = stereo.initialConfig.get()
config.postProcessing.speckleFilter.enable = True
config.postProcessing.speckleFilter.speckleRange = 50
config.postProcessing.temporalFilter.enable = False
config.postProcessing.spatialFilter.enable = True
config.postProcessing.spatialFilter.holeFillingRadius = 2
config.postProcessing.spatialFilter.numIterations = 1
config.postProcessing.decimationFilter.decimationFactor = 2
stereo.initialConfig.set(config)
sync = pipeline.create(dai.node.Sync)
sync.setSyncThreshold(timedelta(milliseconds=50))
xoutGrp = pipeline.create(dai.node.XLinkOut)
xoutGrp.setStreamName("xout")
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
stereo.depth.link(sync.inputs["depth"])
color.video.link(sync.inputs["color"])
sync.out.link(xoutGrp.input)
controlIn = pipeline.create(dai.node.XLinkIn)
controlIn.setStreamName("control")
controlIn.out.link(color.inputControl)
with dai.Device(pipeline) as device:
queue = device.getOutputQueue("xout", 1, False)
controlQueue = device.getInputQueue("control")
control = dai.CameraControl()
control.setAutoWhiteBalanceMode(dai.CameraControl.AutoWhiteBalanceMode.OFF)
control.setManualWhiteBalance(5000)
control.setManualFocus(130)
controlQueue.send(control)
while True:
t = time.time()
msgGrp = queue.get()
for name, msg in msgGrp:
frame = msg.getCvFrame()
frame = cv2.resize(frame, (768, 432))
if name == "depth":
frameColored = np.where(frame > 0, cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX), 0).astype(np.uint8)
frameColored = cv2.applyColorMap(frameColored, cv2.COLORMAP_JET)
frameColored[frame == 0] = [0, 0, 0]
cv2.imshow("depth", frameColored)
else:
cv2.imshow(name, frame)
if cv2.waitKey(1) == ord("q"):
break
print(time.time()-t)
cv2.destroyAllWindows()