Hi @jakaskerl,
I am getting the following error for many DepthAi pipeline scripts I try to generate the pipeline graph for.
Pipeline schema retrieved
Program exited.
0. MonoCamera.out (16) -> StereoDepth.left (24)
1. MonoCamera.out (20) -> StereoDepth.right (25)
2. ColorCamera.preview (5) -> SpatialDetectionNetwork.in (9)
3. StereoDepth.depth (27) -> SpatialDetectionNetwork.inputDepth (10)
4. SpatialDetectionNetwork.passthrough (12) -> XLinkOut.in (40)
5. SpatialDetectionNetwork.out (11) -> XLinkOut.in (41)
6. SpatialDetectionNetwork.passthroughDepth (14) -> XLinkOut.in (42)
Traceback (most recent call last):
File "C:\Users\Yishu\anaconda3\envs\emvs_test\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\Yishu\anaconda3\envs\emvs_test\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Users\Yishu\anaconda3\envs\emvs_test\Scripts\pipeline_graph.exe\__main__.py", line 7, in <module>
File "C:\Users\Yishu\anaconda3\envs\emvs_test\lib\site-packages\depthai_pipeline_graph\pipeline_graph.py", line 375, in main
p.cmd_tool(args)
File "C:\Users\Yishu\anaconda3\envs\emvs_test\lib\site-packages\depthai_pipeline_graph\pipeline_graph.py", line 221, in cmd_tool
self.create_graph(schema)
File "C:\Users\Yishu\anaconda3\envs\emvs_test\lib\site-packages\depthai_pipeline_graph\pipeline_graph.py", line 301, in create_graph
src_port = [p for p in self.ports if p.find_node(src_node_id, src_group, src_name)][0]
IndexError: list index out of range
Following are the packages and there respective version I have that are being used to execute the DepthAI pipeline graph.
depthai 2.25.0.0
depthai-pipeline-graph 0.0.5
PySide2 5.15.2.1
I am sharing the script for which I am trying to generate the pipeline graph but am getting the above error.
#!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
import json
'''
Spatial Tiny-yolo example
Performs inference on RGB camera and retrieves spatial location coordinates: x,y,z relative to the center of depth map.
Can be used for tiny-yolo-v3 or tiny-yolo-v4 networks
'''
# Initial delay to alow the PoE Cameras to reset
print("Delay Started")
time.sleep(17)
print("Delay Completed")
# Get argument first
nnBlobPath = "path to model blob file"
configPath = "path to model config file"
#str((Path(__file__).parent / Path('../models/yolo-v4-tiny-tf_openvino_2021.4_6shave.blob')).resolve().absolute())
if not Path(nnBlobPath).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
with open(configPath) as f:
config = json.load(f)
nnConfig = config.get("nn_config", {})
# parse input shape
if "input_size" in nnConfig:
W, H = tuple(map(int, nnConfig.get("input_size").split('x')))
# extract metadata
metadata = nnConfig.get("NN_specific_metadata", {})
classes = metadata.get("classes", {})
coordinates = metadata.get("coordinates", {})
anchors = metadata.get("anchors", {})
anchorMasks = metadata.get("anchor_masks", {})
iouThreshold = metadata.get("iou_threshold", {})
confidenceThreshold = metadata.get("confidence_threshold", {})
# Tiny yolo v3/4 label texts
# parse labels
nnMappings = config.get("mappings", {})
labelMap = nnMappings.get("labels", {})
print("Labels: ", labelMap)
syncNN = True
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
spatialDetectionNetwork = pipeline.create(dai.node.YoloSpatialDetectionNetwork)
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
nnNetworkOut = pipeline.create(dai.node.XLinkOut)
xoutRgb = pipeline.create(dai.node.XLinkOut)
xoutNN = pipeline.create(dai.node.XLinkOut)
xoutDepth = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName("rgb")
xoutNN.setStreamName("detections")
xoutDepth.setStreamName("depth")
nnNetworkOut.setStreamName("nnNetwork")
# Properties
camRgb.setPreviewSize(W, H)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setCamera("left")
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setCamera("right")
# setting node configs
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# Align depth map to the perspective of RGB camera, on which inference is done
stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
stereo.setSubpixel(True)
spatialDetectionNetwork.setBlobPath(nnBlobPath)
spatialDetectionNetwork.setConfidenceThreshold(confidenceThreshold)
spatialDetectionNetwork.input.setBlocking(False)
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
spatialDetectionNetwork.setDepthLowerThreshold(100)
spatialDetectionNetwork.setDepthUpperThreshold(5000)
# Yolo specific parameters
spatialDetectionNetwork.setNumClasses(classes)
spatialDetectionNetwork.setCoordinateSize(coordinates)
spatialDetectionNetwork.setAnchors(anchors)
spatialDetectionNetwork.setAnchorMasks(anchorMasks)
spatialDetectionNetwork.setIouThreshold(iouThreshold)
# Linking
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
camRgb.preview.link(spatialDetectionNetwork.input)
if syncNN:
spatialDetectionNetwork.passthrough.link(xoutRgb.input)
else:
camRgb.preview.link(xoutRgb.input)
spatialDetectionNetwork.out.link(xoutNN.input)
stereo.depth.link(spatialDetectionNetwork.inputDepth)
spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input)
spatialDetectionNetwork.outNetwork.link(nnNetworkOut.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
networkQueue = device.getOutputQueue(name="nnNetwork", maxSize=4, blocking=False)
startTime = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
printOutputLayersOnce = True
while True:
inPreview = previewQueue.get()
inDet = detectionNNQueue.get()
depth = depthQueue.get()
inNN = networkQueue.get()
if printOutputLayersOnce:
toPrint = 'Output layer names:'
for ten in inNN.getAllLayerNames():
toPrint = f'{toPrint} {ten},'
print(toPrint)
printOutputLayersOnce = False
frame = inPreview.getCvFrame()
depthFrame = depth.getFrame() # depthFrame values are in millimeters
depth_downscaled = depthFrame[::4]
if np.all(depth_downscaled == 0):
min_depth = 0 # Set a default minimum depth value when all elements are zero
else:
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET)
counter+=1
current_time = time.monotonic()
if (current_time - startTime) > 1 :
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
detections = inDet.detections
# If the frame is available, draw bounding boxes on it and show the frame
height = frame.shape[0]
width = frame.shape[1]
color_spool = (255, 0, 0)
color_person = (0, 255, 0)
color_text = (45, 255, 255)
for detection in detections:
roiData = detection.boundingBoxMapping
roi = roiData.roi
roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
xmin = int(topLeft.x)
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = labelMap[detection.label]
except:
label = detection.label
if label == 'spool':
color_det = color_spool
else:
color_det = color_person
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color_text)
cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color_text)
cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color_text)
cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color_text)
cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color_text)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
cv2.namedWindow("depth", cv2.WINDOW_NORMAL)
cv2.imshow("depth", depthFrameColor)
cv2.namedWindow("rgb", cv2.WINDOW_NORMAL)
cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
Looks like the list in the src_port assignment is an empty list because of which the access to the 0th element gives this index out of range error. This tool is really useful for debugging. However I'm unable to get around this error itself for generating the pipeline graph. I hope you can help me out with resolving this error.
Thanks & Regards
Yishu