parentDir = Path(file).parent
model = 'yolov8n.blob'
customModel = True
sizeX = 640
sizeY = 640
def init_pipeline():
pipeline = depthai.Pipeline()
cam_rgb = pipeline.createColorCamera()
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_4_K)
cam_rgb.setPreviewSize(640, 480)
cam_rgb.setInterleaved(False)
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
manip1 = pipeline.createImageManip()
manip1.setMaxOutputFrameSize(1244160)
manip1.initialConfig.setResize(sizeX, sizeY)
cam_rgb.preview.link(manip1.inputImage)
# Next, we want a neural network that will produce the detections
detection_nn = pipeline.createMobileNetDetectionNetwork()
# Blob is the Neural Network file, compiled for MyriadX. It contains both the definition and weights of the model
# We're using a blobconverter tool to retreive the MobileNetSSD blob automatically from OpenVINO Model Zoo
# detection_nn.setBlobPath(blobconverter.from_zoo(name='mobilenet-ssd', shaves=6))
# Next, we filter out the detections that are below a confidence threshold. Confidence can be anywhere between <0..1>
detection_nn.setConfidenceThreshold(0.5)
# Next, we link the camera 'preview' output to the neural network detection input, so that it can produce detections
manip1.out.link(detection_nn.input)
if customModel is True:
nnPath = str((parentDir / Path('../../data/' + model)).resolve().absolute())
detection_nn.setBlobPath(nnPath)
print("Custom Model" + nnPath + "Size: " + str(sizeX) + "x" + str(sizeY))
else:
detection_nn.setBlobPath(blobconverter.from_zoo(name='person-detection-0106', shaves=6))
print("Model from OpenVINO Zoo" + "Size: " + str(sizeX) + "x" + str(sizeY))
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)
return pipeline
this is just part of the code the full code is very large
basically i'm trying to get people count inside a zone but since the use case was for cctv type with people very far away so trying with yolo v8
and no i'm not using for the spatial detections.