patrick52682
Ran the script below for 12+h and couldn't reproduce the issue. Can you?
import depthai as dai
import numpy as np
import cv2
import os
import datetime
# set environment variable DEPTHAI_PROTOCOL=tcpip
os.environ["DEPTHAI_PROTOCOL"] = "tcpip"
class DepthAIHandler:
def __init__(self):
# Initialize the device and pipeline
self.device = dai.Device()
self.pipeline = dai.Pipeline()
# Set up the various camera parameters and configurations
self.rgb_fps = 30
self.camera_focus = 130 # Adjust as needed
self.camera_exposure_us = 20000 # Microseconds
self.camera_iso_sensitivity = 800
self.depth_camera_resolution = dai.MonoCameraProperties.SensorResolution.THE_720_P
self.depth_fps = 30
self.brightness = 0 # Adjust as needed
self.spatial_filter_hole_filling_radius = 2
self.spatial_filter_num_iterations = 1
self.speckle_filter_speckle_range = 50
def logger(self, rgb_frame, depth_frame):
# Set up a logger to log data regarding latency of rgb and depth frames
with open("latency_log.txt", "a") as f:
current_time = dai.Clock.now()
rgb_latency = current_time - rgb_frame.getTimestamp()
depth_latency = current_time - depth_frame.getTimestamp()
f.write(
f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')},RGB_latency:{rgb_latency},Depth_latency:{depth_latency}\n"
)
def setup_pipeline(self):
# Create the color camera
rgb_camera = self.pipeline.create(dai.node.ColorCamera)
rgb_camera.setInterleaved(False)
rgb_camera.setBoardSocket(dai.CameraBoardSocket.RGB)
rgb_camera.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
rgb_camera.setFps(self.rgb_fps)
rgb_camera.setNumFramesPool
rgb_camera.initialControl.setManualFocus(self.camera_focus)
rgb_camera.initialControl.setManualExposure(
self.camera_exposure_us, self.camera_iso_sensitivity
)
rgb_camera.initialControl.setSharpness(0)
rgb_camera.initialControl.setLumaDenoise(0)
rgb_camera.initialControl.setChromaDenoise(0)
rgb_camera.setIspScale(1, 2)
# RGB output node for clients
rgb_out = self.pipeline.create(dai.node.XLinkOut)
rgb_out.setStreamName("rgb")
# RGB video encoding node
encoder = self.pipeline.create(dai.node.VideoEncoder)
encoder.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
rgb_camera.video.link(encoder.input)
encoder.bitstream.link(rgb_out.input)
# Create left/right mono cameras for Stereo depth
depth_out = self.pipeline.create(dai.node.XLinkOut)
depth_out.setStreamName("depth")
left_camera = self.pipeline.create(dai.node.MonoCamera)
left_camera.setResolution(self.depth_camera_resolution)
left_camera.setBoardSocket(dai.CameraBoardSocket.LEFT)
left_camera.setFps(self.depth_fps)
right_camera = self.pipeline.create(dai.node.MonoCamera)
right_camera.setResolution(self.depth_camera_resolution)
right_camera.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right_camera.setFps(self.depth_fps)
# Create a Stereo node that will produce the depth map
depth_camera = self.pipeline.create(dai.node.StereoDepth)
depth_camera.setDefaultProfilePreset(
dai.node.StereoDepth.PresetMode.HIGH_ACCURACY
)
depth_camera.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
depth_camera.setLeftRightCheck(True)
depth_camera.setExtendedDisparity(False)
depth_camera.setSubpixel(True)
depth_camera.setDepthAlign(dai.CameraBoardSocket.RGB)
# Set depth post-processing
config = depth_camera.initialConfig.get()
config.postProcessing.spatialFilter.enable = True
config.postProcessing.spatialFilter.holeFillingRadius = (
self.spatial_filter_hole_filling_radius
)
config.postProcessing.spatialFilter.numIterations = (
self.spatial_filter_num_iterations
)
config.postProcessing.speckleFilter.enable = True
config.postProcessing.speckleFilter.speckleRange = (
self.speckle_filter_speckle_range
)
config.postProcessing.decimationFilter.decimationFactor = 4
depth_camera.initialConfig.set(config)
left_camera.out.link(depth_camera.left)
right_camera.out.link(depth_camera.right)
depth_camera.depth.link(depth_out.input)
def start_pipeline(self):
# Set the flood light and dot projector brightness [0-1500]
# Start the pipeline
self.device.startPipeline(self.pipeline)
def run(self):
# Setup the pipeline
self.setup_pipeline()
# Start the pipeline
self.start_pipeline()
# Processing the output
try:
rgb_queue = self.device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
depth_queue = self.device.getOutputQueue(name="depth", maxSize=4, blocking=False)
while True:
# Get RGB frame
rgb_frame = rgb_queue.get()
# Get depth map
depth_frame = depth_queue.get()
# setup a logger to a file and log data regarding latency of rgb and depth frames every 30eth frame
if rgb_frame.getSequenceNum() % 10 == 0:
self.logger(rgb_frame, depth_frame)
if cv2.waitKey(1) == ord('q'):
break
finally:
# Cleanup
cv2.destroyAllWindows()
self.device.close()
# Main execution
if __name__ == "__main__":
handler = DepthAIHandler()
handler.run()
Thanks
Jaka