Hello, I am currently working on a feature-detection based code used for the purpose of aligning a square-shaped object with the camera. I am making use of the OAK-D pro cameras. The code is included at the bottom of this post. When uploading this code to a OAK-D PRO camera with autofocus, the code seems to run perfectly fine. With an OAK-D PRO with fixed focus though, I get the following error though:
RuntimeError Traceback (most recent call last) Cell In[41], [line 207 ](vscode-notebook-cell:?execution_count=41&line=207)[204](vscode-notebook-cell:?execution_count=41&line=204) colorFrame = passthroughFrameColor [206](vscode-notebook-cell:?execution_count=41&line=206) trackedFeaturesColor = outputFeaturesColorQueue.get().trackedFeatures --> [207](vscode-notebook-cell:?execution_count=41&line=207) depthFrame = depthQueue.get().getFrame() [209](vscode-notebook-cell:?execution_count=41&line=209) center = (colorFrame.shape[1] // 2, colorFrame.shape[0] // 2) [211](vscode-notebook-cell:?execution_count=41&line=211) colorFeatureDrawer.drawFeatures(colorFrame, trackedFeaturesColor, center, depthFrame) RuntimeError: Communication exception - possible device error/misconfiguration. Original message 'Couldn't read data from stream: 'depth' (X_LINK_ERROR)'
Posts with similar issues seemed to have problems with their USB cable, which seems to and should not be the case with mine, as I am using the one included in the box that comes with the camera. What could be the possible setting of the camera that I am using that is causing this fixed-focus specific problem?
Thanks in advance for your response!
The code in question:
import cv2
import depthai as dai
import numpy as np
class FeatureTrackerDrawer:
def __init__(self, windowName):
self.windowName = windowName
cv2.namedWindow(windowName)
self.circleRadius = 2
self.pointColor = (0, 0, 255)
self.lineColor = (0, 255, 0)
self.crossColor = (0, 255, 0)
self.depthColor = (255, 255, 0)
self.statusCircleRadius = 20
self.statusCircleColorGreen = (0, 255, 0)
self.statusCircleColorRed = (0, 0, 255)
self.statusTextColor = (255, 255, 255)
self.arrowColor = (173, 216, 230)
self.input_mode = False
self.input_mode_square_size = False
self.x_coordinate_tumor = 50
self.y_coordinate_tumor = 50
self.square_size = 200
self.input_text = ""
self.input_text_square_size = ""
self.mouse_x = 0
self.mouse_y = 0
cv2.setMouseCallback(self.windowName, self.mouse_callback, self)
def drawFeatures(self, img, features, center, depthFrame):
x_center_tumor, y_center_tumor = self.drawDynamicSquare(img)
top_left_square = (x_center_tumor - self.square_size // 2, y_center_tumor - self.square_size // 2)
small_square_size = 20
corners = [
(top_left_square[0], top_left_square[1]),
(top_left_square[0] + self.square_size - small_square_size, top_left_square[1]),
(top_left_square[0], top_left_square[1] + self.square_size - small_square_size),
(top_left_square[0] + self.square_size - small_square_size, top_left_square[1] + self.square_size - small_square_size)
]
for corner in corners:
small_square_top_left = corner
small_square_bottom_right = (small_square_top_left[0] + small_square_size, small_square_top_left[1] + small_square_size)
cv2.rectangle(img, small_square_top_left, small_square_bottom_right, (255, 0, 0), 2)
detected_corners = set()
for feature in features:
position = (int(feature.position.x), int(feature.position.y))
for corner_index, corner in enumerate(corners):
if corner_index in detected_corners:
continue
small_square_top_left = corner
small_square_bottom_right = (small_square_top_left[0] + small_square_size, small_square_top_left[1] + small_square_size)
if (small_square_top_left[0] <= position[0] <= small_square_bottom_right[0] and
small_square_top_left[1] <= position[1] <= small_square_bottom_right[1]):
cv2.circle(img, position, self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0)
cv2.line(img, position, center, self.lineColor, 1)
if (0 <= position[1] < depthFrame.shape[0]) and (0 <= position[0] < depthFrame.shape[1]):
depth = depthFrame[position[1], position[0]]
cv2.putText(img, f"Depth: {depth:.2f}", (position[0], position[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.depthColor, 1, cv2.LINE_AA)
detected_corners.add(corner_index)
break
is_aligned = len(detected_corners) == 4
status_circle_center = (img.shape[1] - 30, img.shape[0] - 30)
status_circle_color = self.statusCircleColorGreen if is_aligned else self.statusCircleColorRed
cv2.circle(img, status_circle_center, self.statusCircleRadius, status_circle_color, -1)
if not self.input_mode:
cv2.putText(img, f"Tumor Coordinates: ({self.x_coordinate_tumor}, {self.y_coordinate_tumor})", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
else:
cv2.putText(img, f"Input Mode: {self.input_text}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
if not self.input_mode_square_size:
cv2.putText(img, f"Square Size: {self.square_size}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
else:
cv2.putText(img, f"Define Square Size: {self.input_text_square_size}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
cv2.putText(img, f"Mouse Coordinates: ({self.mouse_x}, {self.mouse_y})", (10, img.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
commands_text = "Commands: 'p' - Enter input mode | 'o' - Set coordinates | 'q' - Quit | 's' - Switch mode"
cv2.putText(img, commands_text, (10, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
def drawCenterCross(self, img, center):
cv2.drawMarker(img, center, self.crossColor, markerType=cv2.MARKER_CROSS, markerSize=20, thickness=2)
def handleInput(self, key):
if key == ord('o') and self.input_mode:
try:
x, y = map(int, self.input_text.split(';'))
self.x_coordinate_tumor = x
self.y_coordinate_tumor = y
print(f"Coordinates set to: x = {x}, y = {y}")
except ValueError:
print("Invalid input format. Please enter two numbers separated by a semicolon (e.g., 100;200).")
self.input_mode = False
self.input_text = ""
elif key == ord('m') and self.input_mode_square_size:
try:
self.square_size = int(self.input_text_square_size)
print(f"Size set to: {self.square_size}")
except ValueError:
print("Invalid input format. Please enter a valid number.")
self.input_mode_square_size = False
self.input_text_square_size = ""
elif key == 8: # Backspace
if self.input_mode_square_size:
self.input_text_square_size = self.input_text_square_size[:-1]
else:
self.input_text = self.input_text[:-1]
elif key == 13:
self.input_mode = False
self.input_mode_square_size = False
elif 32 <= key <= 126:
if self.input_mode_square_size:
self.input_text_square_size += chr(key)
else:
self.input_text += chr(key)
def drawDynamicSquare(self, img):
x_center_tumor = int(-(self.x_coordinate_tumor / 100.0) * self.square_size + self.square_size/2 + img.shape[1]/2)
y_center_tumor = int(-(self.y_coordinate_tumor / 100.0) * self.square_size + self.square_size/2 + img.shape[0]/2)
top_left = (x_center_tumor - self.square_size // 2, y_center_tumor - self.square_size // 2)
bottom_right = (x_center_tumor + self.square_size // 2, y_center_tumor + self.square_size // 2)
cv2.rectangle(img, top_left, bottom_right, (0, 255, 0), 2)
return x_center_tumor, y_center_tumor
def mouse_callback(self, event, x, y, flags, param):
if event == cv2.EVENT_MOUSEMOVE:
self.mouse_x = x
self.mouse_y = y
pipeline = dai.Pipeline()
colorCam = pipeline.create(dai.node.ColorCamera)
featureTrackerColor = pipeline.create(dai.node.FeatureTracker)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
xoutPassthroughFrameColor = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesColor = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)
xoutDepth = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameColor.setStreamName("passthroughFrameColor")
xoutTrackedFeaturesColor.setStreamName("trackedFeaturesColor")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
xoutDepth.setStreamName("depth")
colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
colorCam.setIspScale(2, 3)
colorCam.video.link(featureTrackerColor.inputImage)
colorCam.setInterleaved(False)
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
stereo.setLeftRightCheck(True)
stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
featureTrackerColor.passthroughInputImage.link(xoutPassthroughFrameColor.input)
featureTrackerColor.outputFeatures.link(xoutTrackedFeaturesColor.input)
xinTrackedFeaturesConfig.out.link(featureTrackerColor.inputConfig)
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.depth.link(xoutDepth.input)
featureTrackerColor.setHardwareResources(2, 2)
featureTrackerConfig = featureTrackerColor.initialConfig.get()
with dai.Device(pipeline) as device:
passthroughImageColorQueue = device.getOutputQueue("passthroughFrameColor", 8, False)
outputFeaturesColorQueue = device.getOutputQueue("trackedFeaturesColor", 8, False)
inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")
depthQueue = device.getOutputQueue("depth", 8, False)
colorWindowName = "color"
colorFeatureDrawer = FeatureTrackerDrawer(colorWindowName)
while True:
inPassthroughFrameColor = passthroughImageColorQueue.get()
passthroughFrameColor = inPassthroughFrameColor.getCvFrame()
colorFrame = passthroughFrameColor
trackedFeaturesColor = outputFeaturesColorQueue.get().trackedFeatures
depthFrame = depthQueue.get().getFrame()
center = (colorFrame.shape[1] // 2, colorFrame.shape[0] // 2)
colorFeatureDrawer.drawFeatures(colorFrame, trackedFeaturesColor, center, depthFrame)
colorFeatureDrawer.drawCenterCross(colorFrame, center)
cv2.imshow(colorWindowName, colorFrame)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION
print("Switching to hardware-accelerated motion estimation")
else:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW
print("Switching to Lucas-Kanade optical flow")
cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
elif key == ord('p'):
colorFeatureDrawer.input_mode = not colorFeatureDrawer.input_mode
if colorFeatureDrawer.input_mode:
colorFeatureDrawer.input_text = ""
elif colorFeatureDrawer.input_mode:
colorFeatureDrawer.handleInput(key)
elif key == ord('l'):
colorFeatureDrawer.input_mode_square_size = not colorFeatureDrawer.input_mode_square_size
if colorFeatureDrawer.input_mode_square_size:
colorFeatureDrawer.input_text_square_size = ""
elif colorFeatureDrawer.input_mode_square_size:
colorFeatureDrawer.handleInput(key)
cv2.destroyAllWindows()