I would like to capture single RGB + depth images from python code for a OAK-D LR model. I use the the script below, but I get an error:
line 16, in <module>
device = dai.Device(deviceInfo=device_info)
RuntimeError: Failed to connect to device, error message: X_LINK_DEVICE_NOT_FOUND
Using the OAK viewer I have no issues getting an RGB stream + IP of the device (it's a local link ip)
What i've done:
- OAK viewer: I can see the RGB stream+IP no problem.
- Installed depthai github and tried some other example scripts, same error
- Try ssh from terminal but gets refused, seems OS on camera is older version.
- Try usb-c cable and using the 'adb' command for android devices mentioned in docs. I see no device listed, but lsusb lists the camera as connected.
- Try turning off wifi (this needed to be done to connect through viewer)
Maybe through the code the device is not found on the network? I did supply the specific IP shown in the OAK viewer. How to proceed here?
#!/usr/bin/env python3
import cv2 # type: ignoreimport numpy as npimport depthai as daiimport timeimport datetime
# We'll want this to be high enough to make our sync threshold# reasonableFPS = 15
# Create pipelinepipeline = dai.Pipeline()device_info = dai.DeviceInfo("169.254.1.222") #ip-adresdevice = dai.Device(deviceInfo=device_info)
# Define sources and outputscamRgb = pipeline.create(dai.node.ColorCamera)
control = pipeline.create(dai.node.XLinkIn)control.setStreamName("control")control.out.link(camRgb.inputControl)
# PropertiescamRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)camRgb.setFps(FPS)
left = pipeline.create(dai.node.MonoCamera)left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)left.setCamera("left")left.setFps(FPS)left.setNumFramesPool(2)
right = pipeline.create(dai.node.MonoCamera)right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)right.setCamera("right")right.setFps(FPS)right.setNumFramesPool(2)
stereo = pipeline.createStereoDepth()# stereo.setNumFramesPool(1)left.out.link(stereo.left)right.out.link(stereo.right)
sync = pipeline.create(dai.node.Sync) # type: ignoresync.setSyncThreshold(datetime.timedelta(milliseconds=5)) # type: ignore# We want to sync every framesync.setSyncAttempts(-1) # type: ignore
camRgb.still.link(sync.inputs["rgb"]) # type: ignorestereo.depth.link(sync.inputs["depth"]) # type: ignore
sync.inputs["rgb"].setBlocking(False) # type: ignoresync.inputs["rgb"].setQueueSize(1) # type: ignoresync.inputs["depth"].setBlocking(False) # type: ignoresync.inputs["depth"].setQueueSize(1) # type: ignore
# This doesn't work as expected... maybe a bug?# for input in sync.getInputs():# input.setBlocking(False)# input.setQueueSize(1)
syncOut = pipeline.create(dai.node.XLinkOut)syncOut.setStreamName("sync")sync.out.link(syncOut.input) # type: ignore
cv2.namedWindow("Luxonis", cv2.WINDOW_NORMAL)cv2.namedWindow("Depth", cv2.WINDOW_NORMAL)
with device: device.startPipeline(pipeline) camera_control = device.getInputQueue(name="control")
# rgbWindowName = "rgb" # cv2.namedWindow(rgbWindowName, cv2.WINDOW_NORMAL) sync_queue = device.getOutputQueue("sync", maxSize=4, blocking=False)
ctrl = dai.CameraControl() ctrl.setCaptureStill(True)
while True: print("Triggering Capture") camera_control.send(ctrl)
frame = sync_queue.get()
image_oakd: np.ndarray = frame["rgb"].getCvFrame() # type: ignore image_depth: np.ndarray = frame["depth"].getCvFrame() # type: ignore
cv2.imshow("Luxonis", image_oakd) cv2.imshow("Depth", image_depth)
if cv2.waitKey(1000) == ord("q"): break