Hi,
I'm trying to run a Pipeline
with a MobileNetDetectionNetwork
using my YOLOv5 .onnx
model. I'm running into the following error:
"[14442C10815E90D000] [1.2.3.1] [1.054] [DetectionNetwork(3)] [error] Input tensor 'input' (0) exceeds available data range. Data size (143360B), tensor offset (0), size (430080B) - skipping inference"
I'm using an Oak-D device with depthai-core v2.25.1
,
Here is the code running:
import blobconverter
import cv2
import depthai as dai
VIDEO_SIZE = (1072, 1072)
VIDEO_FPS = 30
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_UNIVERSAL)
openvino_version = '2024.0.2'
def create_output(name: str, output: dai.Node.Output):
xout = pipeline.create(dai.node.XLinkOut)
xout.setStreamName(name)
output.link(xout.input)
cam = pipeline.create(dai.node.ColorCamera)
cam.setPreviewSize(VIDEO_SIZE)
cam.setVideoSize(VIDEO_SIZE)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setPreviewNumFramesPool(VIDEO_FPS)
cam.setFps(VIDEO_FPS)
cam.setBoardSocket(dai.CameraBoardSocket.CAM_A)
create_output('color', cam.video)
face_det_manip = pipeline.create(dai.node.ImageManip)
face_det_manip.initialConfig.setResize(448, 320)
face_det_manip.initialConfig.setFrameType(dai.ImgFrame.Type.GRAY8)
face_det_manip.setMaxOutputFrameSize(448*320)
face_det_manip.setNumFramesPool(VIDEO_FPS)
cam.preview.link(face_det_manip.inputImage)
print("Creating Face Detection Neural Network...")
face_det_nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
face_det_nn.setConfidenceThreshold(0.5)
face_det_nn.setBlobPath(blobconverter.from_onnx(
model=".\\yolov5n-320x448.onnx",
data_type="FP16",
shaves=6
))
face_det_manip.out.link(face_det_nn.input)
create_output('detection', face_det_nn.out)
with dai.Device(pipeline) as device:
queues = {}
for q_name in ["color", "detection"]:
queues[q_name] = device.getOutputQueue(q_name)
while True:
for q_name, q in queues.items():
if q.has():
msg = q.get()
if q_name == "detection":
print("blah")
if q_name == "color":
cv2.imshow("video", msg.getCvFrame())
else:
print({q_name})
if cv2.waitKey(1) == ord('q'):
break

What may be causing this error?