SadiaC Hi I was able to do so by returning both the pipeline and stereo. Just wanted to make sure if this looks okay! Or if there's a better approach to save the disparity maps from two cameras thank you!
#!/usr/bin/env python3
import cv2
import depthai as dai
import contextlib
from calc import HostSpatialsCalc
from utility import *
import numpy as np
import math
def createPipeline():
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
camRgb = pipeline.create(dai.node.ColorCamera)
camRgb.setPreviewSize(300, 300)
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
# Create output
xoutRgb = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName("rgb")
camRgb.preview.link(xoutRgb.input)
# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
stereo.initialConfig.setConfidenceThreshold(255)
stereo.setLeftRightCheck(True)
stereo.setSubpixel(False)
# Linking
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
xoutDepth = pipeline.create(dai.node.XLinkOut)
xoutDepth.setStreamName("depth")
stereo.depth.link(xoutDepth.input)
xoutDepth = pipeline.create(dai.node.XLinkOut)
xoutDepth.setStreamName("disp")
stereo.disparity.link(xoutDepth.input)
return pipeline, stereo
with contextlib.ExitStack() as stack:
deviceInfos = dai.Device.getAllAvailableDevices()
usbSpeed = dai.UsbSpeed.SUPER
openVinoVersion = dai.OpenVINO.Version.VERSION_2021_4
qRgbMap = []
devices = []
for deviceInfo in deviceInfos:
deviceInfo: dai.DeviceInfo
device: dai.Device = stack.enter_context(dai.Device(openVinoVersion, deviceInfo, usbSpeed))
devices.append(device)
mxId = device.getMxId()
cameras = device.getConnectedCameras()
usbSpeed = device.getUsbSpeed()
eepromData = device.readCalibration2().getEepromData()
pipeline, stereo= createPipeline()
device.startPipeline(pipeline)
# Output queue will be used to get the rgb frames from the output defined above
depthQueue = device.getOutputQueue(name="depth")
dispQ = device.getOutputQueue(name="disp")
text = TextHelper()
hostSpatials = HostSpatialsCalc(device)
y = 200
x = 300
step = 3
delta = 5
hostSpatials.setDeltaRoi(delta)
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
stream_name = "rgb-" + mxId + "-" + eepromData.productName
stream_name_depth = "depth-" + mxId + "-" + eepromData.productName
qRgbMap.append((q_rgb, stream_name, stream_name_depth, depthQueue,dispQ))
while True:
for q_rgb, stream_name , stream_name_depth, depthQueue, dispQ in qRgbMap:
depthData = depthQueue.get()
spatials, centroid = hostSpatials.calc_spatials(depthData, (x,y)) # centroid == x/y in our case
# Get disparity frame for nicer depth visualization
disp = dispQ.get().getFrame()
disp = (disp \* (255 / stereo.initialConfig.getMaxDisparity())).astype(np.uint8)
disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET)
# Show the frame
cv2.imshow(stream_name_depth, disp)
if q_rgb.has():
cv2.imshow(stream_name, q_rgb.get().getCvFrame())
if cv2.waitKey(1) == ord('q'):
break