from pathlib import Path
import blobconverter
import cv2
import depthai
import numpy as np
pipeline = depthai.Pipeline()
# Uncomment to get better throughput
pipeline.setXLinkChunkSize(0)
control = pipeline.createXLinkIn()
control.setStreamName('control')
xinTofConfig = pipeline.createXLinkIn()
xinTofConfig.setStreamName('tofConfig')
xout_rgb = pipeline.createXLinkOut()
xout_rgb2 = pipeline.createXLinkOut()
cam_rgb = pipeline.createColorCamera()
cam_rgb2 = pipeline.createColorCamera()
cam_rgb.setPreviewSize(1920, 1080)
cam_rgb2.setPreviewSize(1920, 1080)
cam_rgb.setInterleaved(False)
cam_rgb2.setInterleaved(False)
cam_rgb.setBoardSocket(depthai.CameraBoardSocket.CAM_A)
cam_rgb2.setBoardSocket(depthai.CameraBoardSocket.CAM_C)
# For the rgb camera output, we want the XLink stream to be named "rgb"
xout_rgb.setStreamName("cama")
xout_rgb2.setStreamName("camc")
# Linking camera preview to XLink input, so that the frames will be sent to host
cam_rgb.preview.link(xout_rgb.input)
cam_rgb2.preview.link(xout_rgb2.input)
device = depthai.Device.getDeviceByMxId("")
dai_device_args = [pipeline]
if device[0]:
dai_device_args.append(device[1])
with depthai.Device(*dai_device_args) as device:
q_rgb = device.getOutputQueue("cama")
q_rgb2 = device.getOutputQueue("camc")
frame = None
frame2 = None
while True:
in_rgb = q_rgb.tryGet()
in_rgb2 = q_rgb2.tryGet()
if in_rgb is not None:
frame = in_rgb.getCvFrame()
cv2.imshow("preview", frame)
if in_rgb2 is not None:
frame2 = in_rgb2.getCvFrame()
cv2.imshow("preview", frame2)
if cv2.waitKey(1) == ord('q'):
break