Hi,
We're using a camera with the pipeline underneath. The camera runs fine for around 4 hours. Then its stops producing depth images and features.
```
pipeline = Pipeline()
color = pipeline.createColorCamera()
color.setBoardSocket(CameraBoardSocket.RGB)
color.setFps(10.0)
color.setResolution(ColorCameraProperties.SensorResolution.THE_4_K)
color.setVideoSize(parameters.color_video_width, parameters.color_video_height)
color.setInterleaved(False)
color.initialControl.setSharpness(parameters.sharpness)
color.initialControl.setLumaDenoise(parameters.luma_denoise)
color.initialControl.setChromaDenoise(parameters.chroma_denoise)
color.initialControl.setAutoFocusLensRange(parameters.focus_range_infinity_position, parameters.focus_range_macro_position)
color.initialControl.setAutoExposureLimit(parameters.exposure_limit_us)
# The detection model can't handle 10fps so we drop every 2nd frame and downsample to 5fps
color_downsample_script = pipeline.createScript()
color_downsample_script.setScript(create_downsample_script(DOWNSAMPLE_SCRIPT_STREAM_IN, DOWNSAMPLE_SCRIPT_STREAM_OUT, 2))
color_resize = pipeline.createImageManip()
color_resize.initialConfig.setResize(parameters.color_preview_width, parameters.color_preview_height)
color_resize.initialConfig.setFrameType(ImgFrame.Type.BGR888p)
# Sending high resolution images at 5fps takes too much bandwidth, so we downsample the high resolution frame rate to 1 frame every 4s.
hi_res_downsample_script = pipeline.createScript()
hi_res_downsample_script.setScript(create_downsample_script(DOWNSAMPLE_SCRIPT_STREAM_IN, DOWNSAMPLE_SCRIPT_STREAM_OUT, parameters.high_resolution_image_modulo))
hi_res_encoder = pipeline.createVideoEncoder()
hi_res_encoder.setProfile(VideoEncoderProperties.Profile.MJPEG)
hi_res_encoder.setQuality(parameters.high_resolution_image_quality)
left = pipeline.createMonoCamera()
left.setBoardSocket(CameraBoardSocket.LEFT)
left.setFps(10.0)
left.setResolution(MonoCameraProperties.SensorResolution.THE_400_P)
# left.initialControl.setManualExposure(100, 400)
# left.initialControl.setAutoExposureLock(True)
right = pipeline.createMonoCamera()
right.setBoardSocket(CameraBoardSocket.RIGHT)
right.setFps(10.0)
right.setResolution(MonoCameraProperties.SensorResolution.THE_400_P)
# right.initialControl.setManualExposure(100, 400)
# right.initialControl.setAutoExposureLock(True)
depth = pipeline.createStereoDepth()
# depth.setDepthAlign(CameraBoardSocket.RIGHT)
# depth.setExtendedDisparity(True)
depth.setLeftRightCheck(True) # LR-check required for depth alignment
depth.initialConfig.setConfidenceThreshold(parameters.depth_confidence_threshold)
depth.initialConfig.setMedianFilter(StereoDepthProperties.MedianFilter.MEDIAN_OFF)
depth.initialConfig.setSubpixel(True)
depth.initialConfig.setBilateralFilterSigma(0)
depth.initialConfig.setSubpixelFractionalBits(5)
depth.initialConfig.setDisparityShift(30)
depth.initialConfig.setDepthUnit(RawStereoDepthConfig.AlgorithmControl.DepthUnit.CUSTOM)
depth_config = depth.initialConfig.get()
depth_config.algorithmControl.customDepthUnitMultiplier = parameters.depth_unit_multiplier
depth.initialConfig.set(depth_config)
depth.enableDistortionCorrection(True)
features = pipeline.createFeatureTracker()
features.setHardwareResources(numShaves=2, numMemorySlices=2)
model_name = parameters.detection_model_name
model_config_path = MODEL_FOLDER / (model_name + '.json')
with open(model_config_path) as fp:
model_config = json.load(fp)
model_blob_path = MODEL_FOLDER / (model_name + '.blob')
labels = model_config['labels']
coordinate_size = model_config['coordinate_size']
anchors = model_config['anchors']
anchor_masks = model_config['anchor_masks']
iou_threshold = model_config['iou_threshold']
detector_confidence_threshold = model_config['confidence_threshold']
detection = pipeline.createYoloDetectionNetwork()
detection.setBlobPath(model_blob_path)
detection.setAnchors(anchors)
detection.setAnchorMasks(anchor_masks)
detection.setConfidenceThreshold(detector_confidence_threshold)
detection.setNumClasses(len(labels))
detection.setCoordinateSize(coordinate_size)
detection.setIouThreshold(iou_threshold)
detection.setNumInferenceThreads(2)
detection.input.setBlocking(False)
detection.input.setQueueSize(1)
control_in = pipeline.createXLinkIn()
color_out = pipeline.createXLinkOut()
hi_res_out = pipeline.createXLinkOut()
depth_out = pipeline.createXLinkOut()
features_out = pipeline.createXLinkOut()
detection_out = pipeline.createXLinkOut()
control_in.setStreamName(CONTROL_STREAM_NAME)
color_out.setStreamName(COLOR_STREAM_NAME)
hi_res_out.setStreamName(HI_RES_STREAM_NAME)
depth_out.setStreamName(DEPTH_RIGHT_STREAM_NAME)
features_out.setStreamName(FEATURES_STREAM_NAME)
detection_out.setStreamName(DETECTIONS_STREAM_NAME)
# Also tried without these 2 lines but it didnt make a difference.
depth_out.input.setBlocking(False)
depth_out.input.setQueueSize(1)
control_in.out.link(color.inputControl)
color.video.link(color_downsample_script.inputs[DOWNSAMPLE_SCRIPT_STREAM_IN])
color_downsample_script.outputs[DOWNSAMPLE_SCRIPT_STREAM_OUT].link(color_resize.inputImage)
color_downsample_script.outputs[DOWNSAMPLE_SCRIPT_STREAM_OUT].link(hi_res_downsample_script.inputs[DOWNSAMPLE_SCRIPT_STREAM_IN])
color_resize.out.link(color_out.input)
color_resize.out.link(detection.input)
hi_res_downsample_script.outputs[DOWNSAMPLE_SCRIPT_STREAM_OUT].link(hi_res_encoder.input)
hi_res_encoder.bitstream.link(hi_res_out.input)
left.out.link(depth.left)
right.out.link(depth.right)
depth.depth.link(depth_out.input)
depth.rectifiedRight.link(features.inputImage)
features.outputFeatures.link(features_out.input)
detection.out.link(detection_out.input)
```
and
```
device = Device(self.__pipeline, self.__device_info)
color_queue = device.getOutputQueue(name=COLOR_STREAM_NAME, maxSize=1, blocking=False)
hi_res_queue = device.getOutputQueue(name=HI_RES_STREAM_NAME, maxSize=1, blocking=False)
depth_queue = device.getOutputQueue(name=DEPTH_RIGHT_STREAM_NAME, maxSize=1, blocking=False)
features_queue = device.getOutputQueue(name=FEATURES_STREAM_NAME, maxSize=1, blocking=False)
detections_queue = device.getOutputQueue(name=DETECTIONS_STREAM_NAME, maxSize=1, blocking=False)
color_queue.addCallback(self.__get_and_publish_color_image)
hi_res_queue.addCallback(self.__get_and_publish_hi_res_image)
depth_queue.addCallback(self.__get_and_publish_depth_image)
detections_queue.addCallback(self.__get_and_publish_detections)
features_queue.addCallback(self.__get_and_publish_features)
```
We also collected soms logs (see attached file). No errors whatsoever. Around line 84215 the camera stops producing depth images and features. The logs then only show a drop in CPU and NOC ddr. Do you have any idea what could be wrong? Thanks in advance!