- Edited
AdamPolak
Thank you Adam.
The python code is as following:
import numpy as np
import cv2
import depthai as dai
resolution = (1280, 800) # 24 FPS (without visualization)
lrcheck = False # Better handling for occlusions
extended = False # Closer-in minimum depth, disparity range is doubled
subpixel = True # True # Better accuracy for longer distance, fractional disparity 32-levels
p = dai.Pipeline()
# Configure Mono Camera Properties
left = p.createMonoCamera()
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_800_P)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
right = p.createMonoCamera()
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_800_P)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
stereo = p.createStereoDepth()
left.out.link(stereo.left)
right.out.link(stereo.right)
# Set stereo depth options
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
config = stereo.initialConfig.get()
config.postProcessing.speckleFilter.enable = False
# config.postProcessing.speckleFilter.speckleRange = 60
config.postProcessing.temporalFilter.enable = False
config.postProcessing.spatialFilter.enable = False
# config.postProcessing.spatialFilter.holeFillingRadius = 2
# config.postProcessing.spatialFilter.numIterations = 1
config.postProcessing.thresholdFilter.minRange = 1000 # mm
config.postProcessing.thresholdFilter.maxRange = 10000 # mm
config.censusTransform.enableMeanMode = True
# this 2 parameters should be fine-tuning
config.costMatching.linearEquationParameters.alpha = 0
config.costMatching.linearEquationParameters.beta = 2
stereo.initialConfig.set(config)
stereo.setLeftRightCheck(lrcheck)
stereo.setExtendedDisparity(extended)
stereo.setSubpixel(subpixel)
# stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout
# Depth -> Depth Diff
nn = p.createNeuralNetwork()
nn.setBlobPath("diff_images_simplified_openvino_2022.1_4shave.blob")
script = p.create(dai.node.Script)
stereo.disparity.link(script.inputs['in'])
timestamp = dai.Clock.now()
print("ts1 = ", timestamp)
script.setScript("""
old = node.io['in'].get()
while True:
frame = node.io['in'].get()
node.io['img1'].send(old)
node.io['img2'].send(frame)
old = frame
""")
script.outputs['img1'].link(nn.inputs['input2'])
script.outputs['img2'].link(nn.inputs['input1'])
# stereo.disparity.link(nn.inputs["input1"])
depthDiffOut = p.createXLinkOut()
depthDiffOut.setStreamName("depth_diff")
nn.out.link(depthDiffOut.input)
with dai.Device(p) as device:
qDepthDiff = device.getOutputQueue(name="depth_diff", maxSize=4, blocking=False)
while True:
depthDiff = qDepthDiff.get()
print("ts0 = ", timestamp)
time_diff = depthDiff.getTimestamp() - timestamp
print('time_diff = ', time_diff)
timestamp = depthDiff.getTimestamp()
print("ts 2 = ", timestamp)
# Shape it here
floatVector = depthDiff.getFirstLayerFp16()
diff = np.array(floatVector).reshape(resolution[0], resolution[1])
colorize = cv2.normalize(diff, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
cv2.applyColorMap(colorize, cv2.COLORMAP_JET)
cv2.imshow("Diff", colorize)
if cv2.waitKey(1) == ord('q'):
break