Neurotronics67

  • Jun 16, 2024
  • Joined May 17, 2024
  • 0 best answers
  • jakaskerl
    Thanks for your answer,
    unfortunately it doesn't work either

    [1944301031B09D2E00] [1.1] [0.691] [ColorCamera(0)] [warning] Unsupported resolution set for detected camera OV9782, needs 800_P or 720_P. Defaulting to 800_P
  • Hello,

    I'm in possession of a OAK-D Wide camera for robot navigation application.
    My goal is simple : I want to recover the RGB and an aligned filtered depth the fastest way possible at low resolution.

    I start with the disparity processing to get the smoothest result; I use a decimation filter of 3 to accelerate the process. (the resulting size is 221x140)

    Then I've added the RGB camera output and I set resolution to the same size of the disparity map (eg.camRgb.setPreviewSize(221, 140))

    Unfortunately, I get the warning :

    [1944301031B09D2E00] [1.1] [0.692] [ColorCamera(0)] [warning] Unsupported resolution set for detected camera OV9782, needs 800_P or 720_P. Defaulting to 800_P

    And the following result (processed depth and RGB) is resized to 800_p, which is logical since the disparity is resized to the RGB resolution.

    My question : How can I set the RGB resolution to match to the disparity size to get the result the fastest way possible ? Is there a workaround ?

    best regards,

    Neuro

    #!/usr/bin/env python3
    
    import cv2
    import depthai as dai
    import numpy as np
    
    # Closer-in minimum depth, disparity range is doubled (from 95 to 190):
    extended_disparity = False
    # Better accuracy for longer distance, fractional disparity 32-levels:
    subpixel = True
    # Better handling for occlusions:
    lr_check = True
    
    # Create pipeline
    pipeline = dai.Pipeline()
    # Get device 
    device = dai.Device()
    
    queueNames = []
    
    # Define sources and outputs
    camRgb = pipeline.create(dai.node.ColorCamera)
    monoLeft = pipeline.create(dai.node.MonoCamera)
    monoRight = pipeline.create(dai.node.MonoCamera)
    depth = pipeline.create(dai.node.StereoDepth)
    
    color_out = pipeline.create(dai.node.XLinkOut)
    disp_out = pipeline.create(dai.node.XLinkOut)
    
    color_out.setStreamName("rgb")
    queueNames.append("rgb")
    disp_out.setStreamName("disp")
    queueNames.append("disp")
    
    # init RGB camera
    camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
    camRgb.setPreviewSize(221, 140)
    camRgb.setFps(60)
    
    # get calibration data 
    try:
        calibData = device.readCalibration2()
        lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A)
        if lensPosition:
            camRgb.initialControl.setManualFocus(lensPosition)
    except:
        raise
    
    # Properties
    monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    monoLeft.setCamera("left")
    monoLeft.setFps(60)
    monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    monoRight.setCamera("right")
    monoRight.setFps(60)
    
    # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
    depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
    depth.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF)
    depth.setLeftRightCheck(lr_check)
    depth.setExtendedDisparity(extended_disparity)
    depth.setSubpixel(subpixel)
    depth.setSubpixelFractionalBits(3)
    # align color + depth data
    depth.setDepthAlign(dai.CameraBoardSocket.CAM_A)
    
    # Init post processing spatial filter
    config = depth.initialConfig.get()
    config.postProcessing.spatialFilter.enable = True
    config.postProcessing.spatialFilter.holeFillingRadius = 8
    config.postProcessing.spatialFilter.numIterations = 3
    config.postProcessing.thresholdFilter.minRange = 400
    config.postProcessing.thresholdFilter.maxRange = 4000
    config.postProcessing.decimationFilter.decimationFactor = 3
    depth.initialConfig.set(config)
    
    # Linking
    camRgb.video.link(color_out.input)
    monoLeft.out.link(depth.left)
    monoRight.out.link(depth.right)
    depth.disparity.link(disp_out.input)
    
    
    # Connect to device and start pipeline
    with device:
        # start pipeline 
        device.startPipeline(pipeline)
        # init frame 
        frame_RGB = None
        frame_disp = None
        # init cv2 windows
        cv2.namedWindow("RGB")
        cv2.namedWindow("Depth")
    
        while True:
    
            latestPacket = {}
            latestPacket["rgb"] = None
            latestPacket["disp"] = None
    
            queueEvents = device.getQueueEvents(("rgb", "disp"))
            for queueName in queueEvents:
                packets = device.getOutputQueue(queueName).tryGetAll()
                if len(packets) > 0:
                    latestPacket[queueName] = packets[-1]
            
            if latestPacket["rgb"] is not None:
                frame_RGB = latestPacket["rgb"].getCvFrame()
                # plot result 
                cv2.imshow("RGB", frame_RGB)
    
            if latestPacket["disp"] is not None:
                frame_disp = latestPacket["disp"].getFrame()
                frame_disp = (frame_disp * (255 / depth.initialConfig.getMaxDisparity())).astype(np.uint8)
                frame_disp = cv2.applyColorMap(frame_disp, cv2.COLORMAP_JET)
                cv2.imshow("Depth", frame_disp)
    
    
            if cv2.waitKey(1) == ord('q'):
                break