- Edited
Hi!
I've got a Yolov5 model that's not converting across on the DepthAI Tools website.
I had a Yolov8 model convert successfully, but it gives me the error to discuss here or post an issue on Github.
I can email the model if need be^.^
Hi!
I've got a Yolov5 model that's not converting across on the DepthAI Tools website.
I had a Yolov8 model convert successfully, but it gives me the error to discuss here or post an issue on Github.
I can email the model if need be^.^
Just an error to discuss on these forums. How do I upload the model?
Also, using my Yolov8 model, I can't seem to object track. This is my program so far for that:
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
# Get YOLOv8 model blob file path
nnPath = "16022025.blob"
if not Path(nnPath).exists():
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
# YOLOv8 label map
labelMap = ["sheep"]
syncNN = True
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)
objectTracker = pipeline.create(dai.node.ObjectTracker)
xoutRgb = pipeline.create(dai.node.XLinkOut)
nnOut = pipeline.create(dai.node.XLinkOut)
trackOut = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName("rgb")
nnOut.setStreamName("nn")
trackOut.setStreamName("track")
# Camera properties
camRgb.setPreviewSize(640, 640)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
camRgb.setFps(40)
# YOLOv8 network settings
detectionNetwork.setConfidenceThreshold(0.3)
detectionNetwork.setNumClasses(1)
detectionNetwork.setCoordinateSize(4)
detectionNetwork.setIouThreshold(0.5)
detectionNetwork.setBlobPath(nnPath)
detectionNetwork.setNumInferenceThreads(2)
detectionNetwork.input.setBlocking(True)
# Object Tracker settings
objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
objectTracker.setDetectionLabelsToTrack([0]) # Track class "sheep"
objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.UNIQUE_ID)
# Linking
camRgb.preview.link(detectionNetwork.input)
detectionNetwork.out.link(objectTracker.inputDetections)
detectionNetwork.passthrough.link(objectTracker.inputTrackerFrame)
camRgb.preview.link(objectTracker.inputTrackerFrame)
objectTracker.out.link(trackOut.input)
if syncNN:
objectTracker.passthroughTrackerFrame.link(xoutRgb.input)
else:
camRgb.preview.link(xoutRgb.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
qTrack = device.getOutputQueue(name="track", maxSize=4, blocking=False)
frame = None
detections = []
tracks = []
startTime = time.monotonic()
counter = 0
color2 = (255, 255, 255)
def frameNorm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
def displayFrame(name, frame):
color = (0, 255, 0)
for detection in detections:
bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.putText(frame, labelMap[0], (bbox[0] + 10, bbox[1] + 20),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, color2)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
for track in tracks:
bbox = frameNorm(frame, (track.xmin, track.ymin, track.xmax, track.ymax))
cv2.putText(frame, f"ID: {track.id}", (bbox[0] + 10, bbox[1] + 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
cv2.imshow(name, frame)
while True:
inRgb = qRgb.get() if syncNN else qRgb.tryGet()
inDet = qDet.tryGet()
inTrack = qTrack.tryGet()
if inRgb is not None:
frame = inRgb.getCvFrame()
cv2.putText(frame, "NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)),
(2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color2)
if inDet is not None:
detections = inDet.detections
counter += 1
if inTrack is not None:
tracks = inTrack.tracklets
if frame is not None:
displayFrame("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
I can object detect fine, but when I try to implement object tracking, there is not output and the program just runs. I'm assuming my implementation is wrong, but I'm not too sure where and how to fix it.
Thanks in advance!!