• Yolov8 inference keeps crashing

Im running YoloV8 inference on the Oak-D and it works perfectly for a while then it crashes giving me this specific error below, any tips or guidance in the right direction would be appreciated.

[1844301051C56F0E00] [3.1] [46.510] [ColorCamera(0)] [error] RGB postprocessing timeout!

[1844301051C56F0E00] [3.1] [46.510] [ColorCamera(0)] [error] RGB postprocessing error, rescheduling!

<depthai.ImgDetection object at 0x7f61987d30>

<depthai.ImgDetection object at 0x7f5d5d2b30>

[1844301051C56F0E00] [3.1] [54.442] [ColorCamera(0)] [error] RGB postprocessing timeout!

[1844301051C56F0E00] [3.1] [54.442] [ColorCamera(0)] [error] RGB postprocessing error, rescheduling!

<depthai.ImgDetection object at 0x7f61987d30>

[1844301051C56F0E00] [3.1] [68.203] [system] [critical] Fatal error. Please report to developers. Log: 'pool' '123'

[1844301051C56F0E00] [3.1] [1719328558.308] [host] [warning] Device crashed, but no crash dump could be extracted.

Traceback (most recent call last):

File "main_api_2.py", line 130, in <module>

inRgb = qRgb.tryGet()

RuntimeError: Communication exception - possible device error/misconfiguration. Original message 'Couldn't read data from stream: 'rgb' (X_LINK_ERROR)'

erik Hey erik, thanks for the reply tbh im just trying to run this sample from the GitHub and I tried using a different model yolo8n which I downloaded from the Yolo repo and then converted using the luxonis tools.

depthai-experiments/gen2-yolo/device-decoding/main_api.py at master · luxonis/depthai-experiments (github.com)

parser = argparse.ArgumentParser()

parser.add_argument("-m", "--model", help="Provide model name or model path for inference",

default='model/yolov8n_openvino_2022.1_6shave.blob', type=str)

parser.add_argument("-c", "--config", help="Provide config path for inference",

default='json/yolov8n.json', type=str)

The error seems to be coming from here and happens when using tryGet() aswell

while True:

    `inRgb = qRgb.get()`
    `inDet = qDet.get()`

erik

Apologies I tried to have it one snippet for the MRE but it seems to be formatting weird I hope this will do let me know if any issues

#!/usr/bin/env python3

"""

The code is edited from docs (https://docs.luxonis.com/projects/api/en/latest/samples/Yolo/tiny_yolo/)

We add parsing from JSON files that contain configuration

"""

from pathlib import Path

import sys

import cv2

import depthai as dai

import numpy as np

import time

import argparse

import json

import blobconverter

#yolov4_tiny_coco_416x416',

#yolov7_openvino_2022.1_6shave

#yolov7.json

# parse arguments

parser = argparse.ArgumentParser()

parser.add_argument("-m", "--model", help="Provide model name or model path for inference",

default='model/yolov8n_openvino_2022.1_6shave.blob', type=str)

parser.add_argument("-c", "--config", help="Provide config path for inference",

default='model/yolov8n.json', type=str)

args = parser.parse_args()

# parse config

configPath = Path(args.config)

if not configPath.exists():

raise ValueError("Path {} does not exist!".format(configPath))

with configPath.open() as f:

config = json.load(f)

nnConfig = config.get("nn_config", {})

# parse input shape

if "input_size" in nnConfig:

W, H = tuple(map(int, nnConfig.get("input_size").split('x')))

# extract metadata

metadata = nnConfig.get("NN_specific_metadata", {})

classes = metadata.get("classes", {})

coordinates = metadata.get("coordinates", {})

anchors = metadata.get("anchors", {})

anchorMasks = metadata.get("anchor_masks", {})

iouThreshold = metadata.get("iou_threshold", {})

confidenceThreshold = metadata.get("confidence_threshold", {})

print(metadata)

# parse labels

nnMappings = config.get("mappings", {})

labels = nnMappings.get("labels", {})

# get model path

nnPath = args.model

if not Path(nnPath).exists():

print("No blob found at {}. Looking into DepthAI model zoo.".format(nnPath))

nnPath = str(blobconverter.from_zoo(args.model, shaves = 6, zoo_type = "depthai", use_cache=True))

# sync outputs

syncNN = True

# Create pipeline

pipeline = dai.Pipeline()

# Define sources and outputs

camRgb = pipeline.create(dai.node.ColorCamera)

detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)

xoutRgb = pipeline.create(dai.node.XLinkOut)

nnOut = pipeline.create(dai.node.XLinkOut)

xoutRgb.setStreamName("rgb")

nnOut.setStreamName("nn")

# Properties

camRgb.setPreviewSize(W, H)

camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)

camRgb.setInterleaved(False)

camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

camRgb.setFps(40)

# Network specific settings

detectionNetwork.setConfidenceThreshold(confidenceThreshold)

detectionNetwork.setNumClasses(classes)

detectionNetwork.setCoordinateSize(coordinates)

detectionNetwork.setAnchors(anchors)

detectionNetwork.setAnchorMasks(anchorMasks)

detectionNetwork.setIouThreshold(iouThreshold)

detectionNetwork.setBlobPath(nnPath)

detectionNetwork.setNumInferenceThreads(2)

detectionNetwork.input.setBlocking(False)

# Linking

camRgb.preview.link(detectionNetwork.input)

detectionNetwork.passthrough.link(xoutRgb.input)

detectionNetwork.out.link(nnOut.input)

# Connect to device and start pipeline

with dai.Device(pipeline) as device:

# Output queues will be used to get the rgb frames and nn data from the outputs defined above

qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)

qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)

frame = None

detections = []

startTime = time.monotonic()

counter = 0

color2 = (255, 255, 255)

# nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height

def frameNorm(frame, bbox):

normVals = np.full(len(bbox), frame.shape[0])

normVals[::2] = frame.shape[1]

return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)

def displayFrame(name, frame, detections):

color = (255, 0, 0)

for detection in detections:

bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))

cv2.putText(frame, labels[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)

cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)

cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)

print(detection)

# Show the frame

cv2.imshow(name, frame)

while True:

inRgb = qRgb.tryGet()

inDet = qDet.tryGet()

if inRgb is not None:

frame = inRgb.getCvFrame()

cv2.putText(frame, "NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)),

(2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color2)

if inDet is not None:

detections = inDet.detections

counter += 1

if frame is not None:

displayFrame("rgb", frame, detections)

if cv2.waitKey(1) == ord('q'):

break

    Hi ibrahima15
    Can't repro this because I don't have your model. That said, I tested with the yolov8n_coco_416x416 and experienced no issues.

    Check your model again.

    Thanks,
    Jaka