jakaskerl
I can't seem to be able to upload a file, below is the entire script
import utils
import config
import os
import numpy as np
import cv2 as cv
import depthai as dai
from imutils.video import FPS
from pathlib import Path
def create_yolo_pipeline():
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
cam = pipeline.create(dai.node.ColorCamera)
nn = pipeline.create(dai.node.YoloDetectionNetwork)
xOut = pipeline.create(dai.node.XLinkOut)
nnOut = pipeline.create(dai.node.XLinkOut)
xOut.setStreamName("rgb")
nnOut.setStreamName("nn")
# Properties
cam.setPreviewSize(640,640)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_720_P)
cam.setInterleaved(False)
cam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
cam.setFps(5)
# Netork settings
nn.setConfidenceThreshold(0.5)
nn.setNumClasses(80)
nn.setCoordinateSize(4)
nn.setAnchorMasks({
"side7": [6, 7, 8] # Anchors for detecting large objects
})
nn.setAnchors([
142, 110, 192, 243, 459, 401 # Anchors for side7 (large)
])
nn.setIouThreshold(0.5)
nn.setBlobPath(str(Path(config.cont_clss_blob).resolve().absolute()))
nn.setNumInferenceThreads(2)
nn.input.setBlocking(False)
xOut.input.setBlocking(False)
xOut.input.setQueueSize(1)
# Linking the streams
cam.preview.link(nn.input)
cam.preview.link(xOut.input)
#nn.passthrough.link(xOut.input)
nn.out.link(nnOut.input)
return pipeline
def stream(pipeline):
# Setting up the device
with dai.Device(pipeline) as device:
q_rgb = device.getOutputQueue(name="rgb", maxSize=1, blocking=False)
q_nn = device.getOutputQueue(name="nn", maxSize=1, blocking=False)
# Preset
frame = None
detections = []
# While loop for stream
print("[INFO] Stream started")
while 1:
inRgb = q_rgb.tryGet()
inDet = q_nn.tryGet()
if inDet is not None:
detections = inDet.detections
if inRgb is not None:
print('here')
frame = inRgb.getCvFrame()
if frame is not None:
display("rgb", frame, detections)
if cv.waitKey(1) & 0xFF == ord('q'):
break
print("[INFO] Closing Stream")
cv.destroyAllWindows()
def display(name, frame, detections):
colour = (255,0,0)
font = cv.FONT_HERSHEY_TRIPLEX
for detection in detections:
bbox = frameNorm(frame,
(detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv.putText(frame,
f"{detection.label}",
(bbox[0] + 10, bbox(1) + 20),
font,
0.5,
255)
cv.putText(frame,
f"{int(detection.confidence * 100)}%",
(bbox[0] + 10, bbox[1] + 40),
font,
0.5,
255)
cv.rectangle(frame,
(bbox[0], bbox[1]), (bbox[2], bbox[3]),
colour,
2)
cv.imshow(name, frame)
def frameNorm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
if __name__ == "__main__":
#Setting up the pipeline
print("[INFO] Creating pipeline...")
pipeline = create_yolo_pipeline()
stream(pipeline)
cv.destroyAllWindows()
To make the blob file I just used the online blob converter. I used onnx to blob and use the commands:
--data_type=FP16 --mean_values=[123.675,116.28,103.53] --scale_values=[58.395,57.12,57.375], --input_shape=[1,640,640,3]