jakaskerl Test code is not large, put it all.
currant-yolov8-1107-480.blob - got 1600 picture 5,79 MB file size.
Also run small *.blob file = 600 picture ~4MB file size, but problem same.
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
# Get argument first
nnBlobPath = str((Path(_file_).parent / Path('/home/pi/Desktop/WEB_weeder/weights/currant_08052024_server.blob')).resolve().absolute())
if not Path(nnBlobPath).exists():
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
# Label texts
labelMap = ["currant"] # Assuming there is only one class
syncNN = True
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)
xoutRgb = pipeline.create(dai.node.XLinkOut)
xoutNN = pipeline.create(dai.node.XLinkOut)
#nnNetworkOut = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName("rgb")
xoutNN.setStreamName("detections")
#nnNetworkOut.setStreamName("nnNetwork")
# Properties
camRgb.setPreviewSize(448,448)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_720_P)
#camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) #Var lietot šo resolution
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
camRgb.setFps(30)
camRgb.setInterleaved(False)
detectionNetwork.setBlobPath(nnBlobPath)
detectionNetwork.setConfidenceThreshold(0.8)
detectionNetwork.input.setBlocking(False)
detectionNetwork.setNumInferenceThreads(2)
#detectionNetwork.input.setBlocking(False)
# Yolo specific parameters
detectionNetwork.setNumClasses(1)
detectionNetwork.setCoordinateSize(4)
#detectionNetwork.setNumInferenceThreads(2)
#detectionNetwork.setIouThreshold(0.5)
# Linking
camRgb.preview.link(detectionNetwork.input)
#if syncNN:
detectionNetwork.passthrough.link(xoutRgb.input)
#else:
# camRgb.preview.link(xoutRgb.input)
detectionNetwork.out.link(xoutNN.input)
#detectionNetwork.outNetwork.link(nnNetworkOut.input)
print(1)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
print(2)
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
#networkQueue = device.getOutputQueue(name="nnNetwork", maxSize=4, blocking=False)
startTime = time.monotonic()
counter = 0
fps = 0
color = (0, 150, 255)
#printOutputLayersOnce = True
while True:
print("loop")
inPreview = previewQueue.get()
inDet = detectionNNQueue.get()
#inNN = networkQueue.get()
#if printOutputLayersOnce:
# toPrint = 'Output layer names:'
# for ten in inNN.getAllLayerNames():
# toPrint = f'{toPrint} {ten},'
# print(toPrint)
# printOutputLayersOnce = False
frame = inPreview.getCvFrame()
counter += 1
current_time = time.monotonic()
if (current_time - startTime) > 1:
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
detections = inDet.detections
# If the frame is available, draw bounding boxes on it and show the frame
height, width = frame.shape[:2]
for detection in detections:
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = labelMap[detection.label]
except IndexError:
label = f'Label {detection.label}'
cv2.putText(frame, label, (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"{detection.confidence*100:.2f}%", (x1 + 10, y1 + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, f"NN fps: {fps:.2f}", (2, height - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
time.sleep(0.01)