Hi there π
I have been trying to run a custom network on my device without success. I have tried adjusting several code examples, all of which end up with the same issue:
RuntimeError: Failed to connect to device, error message: X_LINK_DEVICE_ALREADY_IN_USE
I can run the device by simply calling depthai_launcher - that works fine.
Any ideas would be highly appreciated!
Following is one of the code examples I'm trying to use:
import cv2
import depthai as dai
import numpy as np
def visualize_detections_xyxy(frame: np.ndarray, message: dai.ImgDetections):
"""Visualize the detections on the frame.
The detections are in xyxy format (dai.ImgDetections).
"""
labels = ["drone"]
detections = message.detections
for detection in detections:
xmin, ymin, xmax, ymax = (
detection.xmin,
detection.ymin,
detection.xmax,
detection.ymax,
)
if xmin > 1 or ymin > 1 or xmax > 1 or ymax > 1:
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
else:
xmin = int(xmin * frame.shape[1])
ymin = int(ymin * frame.shape[0])
xmax = int(xmax * frame.shape[1])
ymax = int(ymax * frame.shape[0])
cv2.rectangle(
frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2
)
cv2.putText(
frame,
f"{detection.confidence * 100:.2f}%",
(int(xmin) + 10, int(ymin) + 20),
cv2.FONT_HERSHEY_TRIPLEX,
0.5,
(255, 0, 0),
)
if labels is not None:
cv2.putText(
frame,
labels[detection.label],
(int(xmin) + 10, int(ymin) + 40),
cv2.FONT_HERSHEY_TRIPLEX,
0.5,
(255, 0, 0),
)
cv2.imshow("Detections", frame)
if cv2.waitKey(1) == ord("q"):
cv2.destroyAllWindows()
return True
return False
# Create pipeline
pipeline = dai.Pipeline()
MODEL_PATH = "drone_v1_model.blob" # Use the .blob file instead of .onnx.tar.xz
cam = pipeline.create(dai.node.ColorCamera)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_720_P) # Corrected resolution
cam.setFps(30)
# Create the neural network node
nn = pipeline.create(dai.node.NeuralNetwork)
nn.setBlobPath(MODEL_PATH) # Set the path to the .blob file
# Link the camera output to the neural network input
cam.preview.link(nn.input)
# Create output queues for the camera frames and neural network output
frame_output_queue = pipeline.create(dai.node.XLinkOut)
frame_output_queue.setStreamName("frame_output")
cam.preview.link(frame_output_queue.input) # Link to the camera's preview stream
nn_output_queue = pipeline.create(dai.node.XLinkOut)
nn_output_queue.setStreamName("nn_output")
nn.out.link(nn_output_queue.input) # Link to the neural network's output stream
# Start pipeline
with dai.Device(pipeline) as device:
frame_queue = device.getOutputQueue(name="frame_output", maxSize=4, blocking=False)
nn_queue = device.getOutputQueue(name="nn_output", maxSize=4, blocking=False)
while True:
# Get the frame from the frame queue
frame = frame_queue.get().getCvFrame()
# Get the parsed message containing the bounding boxes from the neural network queue
detections = nn_queue.get().detections
# Visualize detections
if visualize_detections_xyxy(frame, detections):
break