The title already might suggest I am hitting hardware limitations, but I would like to double check. I want to have a 4K (dai.ColorCameraProperties.SensorResolution.THE_4_K) RGB camera stream whilst also having stereo camera stream for depth measurements. While testing this I was already dubious if this was going to work with decent FPS. The goal was 15 +- FPS. I am getting 3 +- FPS at the moment. But maybe my implementation has some inefficiency's. So I would like to double check on this forum if it maybe is a user error. I also couldn't find any performance matrices on this matter, to see what for a performance other people or Luxonis is getting. This is my setup, OAK-D Lite camera with this code (Based it on the code from these projects):
import depthai as dai
import cv2
import numpy as np
import math
# Constants
FPS_COLOR = (255, 255, 255)
FPS_POSITION = (10, 30)
class FPSHandler:
def __init__(self):
self.start_time = None
self.frame_count = 0
self.fps = 0.0
def update_fps(self):
if self.start_time is None:
self.start_time = cv2.getTickCount()
else:
self.frame_count += 1
if self.frame_count == 10:
end_time = cv2.getTickCount()
self.fps = 10.0 / ((end_time - self.start_time) / cv2.getTickFrequency())
self.frame_count = 0
self.start_time = end_time
def get_fps(self):
return self.fps
class HostSync:
def __init__(self):
self.arrays = {}
def add_msg(self, name, msg):
if not name in self.arrays:
self.arrays[name] = []
# Add msg to array
self.arrays[name].append({"msg": msg, "seq": msg.getSequenceNum()})
synced = {}
for name, arr in self.arrays.items():
for i, obj in enumerate(arr):
if msg.getSequenceNum() == obj["seq"]:
synced[name] = obj["msg"]
break
if len(synced) == 2: # Color, Depth
# Remove old msgs
for name, arr in self.arrays.items():
for i, obj in enumerate(arr):
if obj["seq"] < msg.getSequenceNum():
arr.remove(obj)
else:
break
return synced
return False
def create_pipeline():
print("Creating pipeline...")
pipeline = dai.Pipeline()
# ColorCamera
cam = pipeline.create(dai.node.ColorCamera)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
cam.setInterleaved(False)
# Create MonoCamera nodes for left and right cameras
left = pipeline.create(dai.node.MonoCamera)
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
right = pipeline.create(dai.node.MonoCamera)
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Create StereoDepth node
stereo = pipeline.create(dai.node.StereoDepth)
stereo.initialConfig.setConfidenceThreshold(240)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setExtendedDisparity(True)
left.out.link(stereo.left)
right.out.link(stereo.right)
# Create XLinkOut for StereoDepth output
depth_xout = pipeline.create(dai.node.XLinkOut)
depth_xout.setStreamName("depth")
stereo.depth.link(depth_xout.input)
# Create XLinkOut for ColorCamera output
cam_xout = pipeline.create(dai.node.XLinkOut)
cam_xout.setStreamName("color")
cam.video.link(cam_xout.input)
print("Pipeline created.")
return pipeline
def main():
with dai.Device(create_pipeline()) as device:
outputs = ['color', 'depth']
queues = [device.getOutputQueue(name, 4, False) for name in outputs]
fps_handler = FPSHandler()
sync = HostSync()
while True:
for q in queues:
if q.has():
synced_msgs = sync.add_msg(q.getName(), q.get())
if synced_msgs:
if 'depth' in synced_msgs:
color_frame = synced_msgs["color"].getFrame()
depth_frame = synced_msgs["depth"].getFrame()
# Check depth frame dimensions and type
if depth_frame is not None:
height, width = depth_frame.shape[:2]
center_x = width // 2
center_y = height // 2
depth_value = depth_frame[center_y, center_x]
print(f"Depth: {depth_value:.2f} mm")
fps_handler.update_fps()
print(f"FPS: {fps_handler.get_fps():.1f}")
else:
print("Depth frame is None")
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
Thank you in advance for the help and maybe I can fix this issue.