self.nnBlobPath = setting.model
self.labelMap = ["person"]
self.syncNN = True
self.pipeline = dai.Pipeline()
self.camRgb = self.pipeline.create(dai.node.ColorCamera)
self.camRgbRotate = self.pipeline.create(dai.node.ColorCamera)
self.objectTracker = self.pipeline.create(dai.node.ObjectTracker)
self.spatialDetectionNetwork = self.pipeline.create(dai.node.MobileNetSpatialDetectionNetwork)
self.monoLeft = self.pipeline.create(dai.node.MonoCamera)
self.monoRight = self.pipeline.create(dai.node.MonoCamera)
self.stereo = self.pipeline.create(dai.node.StereoDepth)
self.xoutRgb = self.pipeline.create(dai.node.XLinkOut)
self.xoutNN = self.pipeline.create(dai.node.XLinkOut)
self.xoutDepth = self.pipeline.create(dai.node.XLinkOut)
self.objectTracker.setDetectionLabelsToTrack([15]) # track only person
#possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF
self.objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
# take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID
self.objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.SMALLEST_ID)
# Setting Stream Name
self.xoutRgb.setStreamName("rgb")
self.xoutNN.setStreamName("detections")
self.xoutDepth.setStreamName("depth")
# Setting Properties
self.camRgb.setPreviewSize(setting.x, setting.y)
self.camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
self.camRgb.setInterleaved(False)
self.camRgb.setFps(40)
self.camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
# Setting stereo resolution
self.monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
self.monoLeft.setCamera("left")
self.monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
self.monoRight.setCamera("right")
self.manipRgb = self.pipeline.create(dai.node.ImageManip)
self.manipRgb.initialConfig.setResize(setting.x, setting.y)
self.manipRgb.initialConfig.setRotationDegrees(180)
self.camRgb.preview.link(self.manipRgb.inputImage)
self.manipRgbOut = self.pipeline.create(dai.node.XLinkOut)
self.manipRgbOut.setStreamName("manip_rgb")
self.manipRgb.out.link(self.manipRgbOut.input)
#self.camRgb.setIspScale(1, 3) # You don't need to downscale (4k -> 720P) video frames
#Squeeze the frame
self.camRgb.setPreviewKeepAspectRatio(False)
# Setting node configs
self.stereo.initialConfig.setConfidenceThreshold(setting.sCT)
# Align depth map to the perspective of RGB camera, on which inference is done
self.stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
self.stereo.setSubpixel(False)
self.stereo.setOutputSize(self.monoLeft.getResolutionWidth(), self.monoLeft.getResolutionHeight())
# Setting parameters for Spatial Detection
self.spatialDetectionNetwork.setBlobPath(self.nnBlobPath)
self.spatialDetectionNetwork.setConfidenceThreshold(setting.CO)
self.spatialDetectionNetwork.input.setBlocking(False)
self.spatialDetectionNetwork.setBoundingBoxScaleFactor(0.9)
self.spatialDetectionNetwork.setDepthLowerThreshold(setting.minDet)
self.spatialDetectionNetwork.setDepthUpperThreshold(setting.maxDet) #distanza di riconoscimento
# Linking
self.monoLeft.out.link(self.stereo.left)
self.monoRight.out.link(self.stereo.right)
self.camRgb.preview.link(self.spatialDetectionNetwork.input)
self.objectTracker.passthroughTrackerFrame.link(self.xoutRgb.input)
self.objectTracker.out.link(self.xoutNN.input)
if self.syncNN:
self.spatialDetectionNetwork.passthrough.link(self.objectTracker.inputTrackerFrame)
else:
self.camRgb.preview.link(self.objectTracker.inputTrackerFrame)
self.spatialDetectionNetwork.passthrough.link(self.objectTracker.inputDetectionFrame)
self.spatialDetectionNetwork.out.link(self.objectTracker.inputDetections)
self.stereo.depth.link(self.spatialDetectionNetwork.inputDepth)`
this is the section of my code where I need to create the pipes for the camera, however I connect to multiple cameras at the same time so this section is common to multiple cams.
only afterwards during a cycle where I check to connect to all the required cameras I check whether they are to be shot or not and I had it set up like this
if setting.degreesCams[setting.ipCams.index(deviceInfo.name)] == 1:
previewQueue = device.getOutputQueue(name="manip_rgb", maxSize=4, blocking=False)
else:
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
manages to connect but after 5 previews it crashes with no output