HI

I am trying to get the gaze-estimation aplication to run standalone on my OAK-D POE.
Sometimes the code runs fine, other times It stops after +- 15 secs and sometimes it crashes in the beginning.
The errors i get are

[1944301001F1201300] [169.254.1.222] [9.478] [ImageManip(11)] [error] Not possible to create warp params. Error: WARP_SWCH_ERR_CACHE_TO_SMALL

[1944301001F1201300] [169.254.1.222] [9.478] [ImageManip(11)] [error] Invalid configuration or input image - skipping frame

after that i get the error

[1944301001F1201300] [169.254.1.222] [13.014] [system] [critical] Fatal error. Please report to developers. Log: 'Fatal error on MSS CPU: trap: 2A, address: 800A5ADC' '0'

I have tried to look for a solution in both the docs and through the git issues pages but without succes.
The weird thing is that some times it works without problem and other times it does not work at all.

Kind regards
Jonas

code:

import time
import depthai as dai
import depthai.node
import blobconverter


#==========[PIPELINE INIT]==========#

pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_4)
openvino_version = '2021.4'

face_nn = pipeline.create(depthai.node.MobileNetDetectionNetwork)
face_nn.setBlobPath(blobconverter.from_zoo(name="face-detection-retail-0004", shaves=6, version=openvino_version))
face_nn.setConfidenceThreshold(0.7)

landmark_manip = pipeline.create(dai.node.ImageManip)
landmark_manip.initialConfig.setResize(60, 60)
landmark_manip.inputConfig.setWaitForMessage(True)

land_nn = pipeline.create(depthai.node.NeuralNetwork)
land_nn.setBlobPath(blobconverter.from_zoo(name="landmarks-regression-retail-0009", shaves=6, version=openvino_version))

headpose_manip = pipeline.create(dai.node.ImageManip)
headpose_manip.initialConfig.setResize(60, 60)
headpose_manip.inputConfig.setWaitForMessage(True)

pose_nn = pipeline.create(depthai.node.NeuralNetwork)
pose_nn.setBlobPath(blobconverter.from_zoo(name="head-pose-estimation-adas-0001", shaves=6, version=openvino_version))

#=========[CAM SETUP]===========#
VIDEO_SIZE = (1072, 1072)
camRgb = pipeline.create(dai.node.ColorCamera)
camRgb.setPreviewSize(1072, 1072)
camRgb.setVideoSize(VIDEO_SIZE)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

copy_manip = pipeline.create(dai.node.ImageManip)
camRgb.preview.link(copy_manip.inputImage)
copy_manip.setNumFramesPool(20)
copy_manip.setMaxOutputFrameSize(1072*1072*3)
# ImageManip that will crop the frame before sending it to the Face detection NN node
face_det_manip = pipeline.create(dai.node.ImageManip)
face_det_manip.initialConfig.setResize(300, 300)
face_det_manip.setMaxOutputFrameSize(300*300*3)
copy_manip.out.link(face_det_manip.inputImage)


monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
script = pipeline.create(dai.node.Script)
script.setProcessor(dai.ProcessorType.LEON_CSS)
#===========[LINKING]===========#
face_det_manip.out.link(face_nn.input)

copy_manip.out.link(script.inputs['preview'])
# OUTPUT FACE_NN TO SCRIPT
face_nn.out.link(script.inputs["face_nn_output"])
script.inputs["face_nn_output"].setBlocking(False)
face_nn.passthrough.link(script.inputs["face_nn_passthrough"])
script.inputs["face_nn_passthrough"].setBlocking(False)

# OUTPUT SCRIPT to INPUT HEADPOSE_MANIP
script.outputs['headpose_cfg'].link(headpose_manip.inputConfig)
script.outputs['headpose_img'].link(headpose_manip.inputImage)

# OUTPUT SRIPT TO INPUT LANDMARK_MANIP
script.outputs['landmark_cfg'].link(landmark_manip.inputConfig)
script.outputs['landmark_img'].link(landmark_manip.inputImage)

#output landmark to script
landmark_manip.out.link(land_nn.input)
land_nn.out.link(script.inputs['landmark_in'])
land_nn.passthrough.link(script.inputs['landmark_pass'])

#OUTPUT HEADPOSE TO SCRIPT
headpose_manip.out.link(pose_nn.input)
pose_nn.out.link(script.inputs['headpose_in'])
pose_nn.passthrough.link(script.inputs['headpose_pass'])

#=================[ LEFT EYE CROP ]=================

left_manip = pipeline.create(dai.node.ImageManip)
left_manip.initialConfig.setResize(128, 128)
left_manip.inputConfig.setWaitForMessage(True)
script.outputs['left_manip_img'].link(left_manip.inputImage)
script.outputs['left_manip_cfg'].link(left_manip.inputConfig)
left_manip.out.link(script.inputs['left_eye_in'])

#=================[ Right EYE CROP ]=================

right_manip = pipeline.create(dai.node.ImageManip)
right_manip.initialConfig.setResize(128, 128)
right_manip.inputConfig.setWaitForMessage(True)
script.outputs['right_manip_img'].link(right_manip.inputImage)
script.outputs['right_manip_cfg'].link(right_manip.inputConfig)
right_manip.out.link(script.inputs['right_eye_in'])


gaze_nn = pipeline.create(dai.node.NeuralNetwork)
gaze_nn.setBlobPath(blobconverter.from_zoo(
    name="gaze-estimation-adas-0002",
    shaves=6,
    version=openvino_version
))
script.outputs['to_gaze'].link(gaze_nn.input)

gaze_nn.out.link(script.inputs['gaze_nn_out'])
script.setScript("""
import traceback
import time
sync = {} #dict of messages

def find_in_dict(target_seq, name):
    # node.warn("find in dict")
    if str(target_seq) in sync:
        return sync[str(target_seq)][name]

def add_to_dict(det, seq, name):
    # node.warn("add to dict")
    sync[str(seq)][name] = det
    
def correct_bb(bb): if bb.xmin < 0: bb.xmin = 0.001 if bb.ymin < 0: bb.ymin = 0.001 if bb.xmax > 1: bb.xmax = 0.999 if bb.ymax > 1: bb.ymax = 0.999
def check_gaze_est(seq): dict = sync[str(seq)] if "left" in dict and "right" in dict and "angles" in dict: #node.warn("GOT ALL 3") # 60x60x3 (x2 frames) + 6 (3 floats) gaze_data = NNData(21606) gaze_data.setSequenceNum(dict['left'].getSequenceNum()) gaze_data.setLayer("left_eye_image", dict['left'].getData()) gaze_data.setLayer("right_eye_image", dict['right'].getData()) gaze_data.setLayer("head_pose_angles", dict['angles']) node.io['to_gaze'].send(gaze_data) # Clear previous results for i, sq in enumerate(sync): # node.warn(f"removing seq num {i} (seq {seq})") del sync[str(seq)] if str(seq) == str(sq): return PAD = 0.05 PAD2x = PAD * 2 def get_eye_coords(x, y, det): xdelta = (det.xmax + det.xmin) / 2 ydelta = (det.ymax + det.ymin) / 2 xmin = x - PAD xmax = xmin + PAD2x ymin = y - PAD ymax = ymin + PAD2x xmin2 = det.xmin + xdelta * xmin xmax2 = det.xmin + xdelta * xmax ymin2 = det.ymin + ydelta * ymin ymax2 = det.ymin + ydelta * ymax ret = (xmin2, ymin2, xmax2, ymax2) # node.warn(f"Eye: {x}/{y}, Crop eyes: {ret}, det {det.xmin}, {det.ymin}, {det.xmax}, {det.ymax}") return ret while True: time.sleep(0.001) try: preview = node.io['preview'].tryGet() if preview is not None: sync[str(preview.getSequenceNum())] = { "frame": preview } face_dets = node.io['face_nn_output'].tryGet() if face_dets is not None: # node.warn("face detections not none") passthrough = node.io['face_nn_passthrough'].tryGet() seq = passthrough.getSequenceNum()
# No detections, carry on if len(face_dets.detections) == 0: del sync[str(seq)] continue
# node.warn(f"New detection {seq}") if len(sync) == 0: # node.warn("nothing in sync") continue img = find_in_dict(seq, "frame") if img is None: # node.warn("img none") continue
add_to_dict(face_dets.detections[0], seq, "detections")

for det in face_dets.detections: # node.warn("det") correct_bb(det) # node.warn(str(det)) # To head post estimation model cfg1 = ImageManipConfig() cfg1.setCropRect(det.xmin, det.ymin, det.xmax, det.ymax) cfg1.setResize(60, 60) cfg1.setKeepAspectRatio(False) # node.warn("sending headpose data") node.io['headpose_cfg'].send(cfg1) node.io['headpose_img'].send(img)
# To face landmark detection model cfg2 = ImageManipConfig() cfg2.setCropRect(det.xmin, det.ymin, det.xmax, det.ymax) cfg2.setResize(48, 48) cfg2.setKeepAspectRatio(False) # node.warn("sending landmark data") node.io['landmark_cfg'].send(cfg2) node.io['landmark_img'].send(img) break # Only 1 face at the time currently supported
# headpose processing headpose = node.io['headpose_in'].tryGet() if headpose is not None: # node.warn("headpose") passthrough = node.io['headpose_pass'].tryGet() seq = passthrough.getSequenceNum() # Face rotation in degrees y = headpose.getLayerFp16('angle_y_fc')[0] p = headpose.getLayerFp16('angle_p_fc')[0] r = headpose.getLayerFp16('angle_r_fc')[0] angles = [y,p,r] #node.warn(f"angles {angles}") #node.warn(str(y)) add_to_dict(angles, seq, "angles") check_gaze_est(seq)
landmark_in = node.io['landmark_in'].tryGet() if landmark_in is not None: passthrough = node.io['landmark_pass'].tryGet() seq = passthrough.getSequenceNum()
img = find_in_dict(seq, "frame") det = find_in_dict(seq, "detections") if img is None or det is None: continue
landmarks = landmark_in.getFirstLayerFp16()
# We need to crop left and right eye out of the face frame left_cfg = ImageManipConfig() left_cfg.setCropRect(*get_eye_coords(landmarks[0], landmarks[1], det)) left_cfg.setResize(60, 60) left_cfg.setKeepAspectRatio(False) node.io['left_manip_cfg'].send(left_cfg) node.io['left_manip_img'].send(img)
right_cfg = ImageManipConfig() right_cfg.setCropRect(*get_eye_coords(landmarks[2], landmarks[3], det)) right_cfg.setResize(60, 60) right_cfg.setKeepAspectRatio(False) node.io['right_manip_cfg'].send(right_cfg) node.io['right_manip_img'].send(img)
left_eye = node.io['left_eye_in'].tryGet() if left_eye is not None: # node.warn("LEFT EYE GOT") seq = left_eye.getSequenceNum() add_to_dict(left_eye, seq, "left") check_gaze_est(seq)
right_eye = node.io['right_eye_in'].tryGet() if right_eye is not None: # node.warn("RIGHT EYE GOT") seq = right_eye.getSequenceNum() add_to_dict(right_eye, seq, "right") check_gaze_est(seq)
gaze_output = node.io['gaze_nn_out'].tryGet() if gaze_output is not None: node.warn("Gaze output acquired") continue except: continue """) # bootloader = dai.DeviceBootloader("169.254.1.222", True) # print("Connected") # progress = lambda p: print( f'Flashing progress: { p*100 :.1f}%') # bootloader.flash(progress, pipeline) with dai.Device(pipeline, dai.DeviceInfo("169.254.1.222")) as device: print("Connected") while True: time.sleep(0.01)`
  • erik replied to this.
    19 days later

    erik
    Hello Erik

    after trying a lot of different things, i am still getting the same error
    the specific error is

    [1944301001F1201300]` [169.254.1.222] [31.011] [system] [critical] Fatal error. Please report to developers. Log: 'Fatal error on MSS CPU: trap: 2A, address: 800A7770' '0'

    The error occurs when I am moving further away from the camera.
    I am trying to get the age-gender-recognition-retail-0013 to run standalone on an OAK-D POE

    The minimal code that reproduces the error is:

    import blobconverter
    import depthai as dai
    
    # Create pipeline
    pipeline = dai.Pipeline()
    
    # Define sources and outputs
    camRgb = pipeline.create(dai.node.ColorCamera)
    faceDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork)
    monoLeft = pipeline.create(dai.node.MonoCamera)
    monoRight = pipeline.create(dai.node.MonoCamera)
    stereo = pipeline.create(dai.node.StereoDepth)
    objectTracker = pipeline.createObjectTracker()
    ageGender = pipeline.create(dai.node.NeuralNetwork)
    # Properties
    camRgb.setPreviewSize(300, 300)
    camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    camRgb.setInterleaved(False)
    camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
    
    monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
    monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
    
    stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
    stereo.setFocalLength(400)
    faceDetectionNetwork.setBlobPath(blobconverter.from_zoo(name='face-detection-retail-0004', shaves=5))
    faceDetectionNetwork.setConfidenceThreshold(0.8)
    faceDetectionNetwork.input.setBlocking(False)
    faceDetectionNetwork.setDepthLowerThreshold(100)
    faceDetectionNetwork.setDepthUpperThreshold(10000)
    
    ageGender.setBlobPath((blobconverter.from_zoo(name='age-gender-recognition-retail-0013', shaves=5)))
    
    objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.UNIQUE_ID)
    objectTracker.inputTrackerFrame.setBlocking(False)
    objectTracker.inputTrackerFrame.setQueueSize(2)
    
    # Setup script
    script = pipeline.create(dai.node.Script)
    script.setProcessor(dai.ProcessorType.LEON_CSS)
    
    # Linking
    monoLeft.out.link(stereo.left)
    monoRight.out.link(stereo.right)
    
    camRgb.preview.link(faceDetectionNetwork.input)
    stereo.depth.link(faceDetectionNetwork.inputDepth)
    
    faceDetectionNetwork.out.link(script.inputs['nn_output'])
    script.inputs['nn_output'].setBlocking(False)
    script.inputs['nn_output'].setQueueSize(1)
    
    camRgb.preview.link(objectTracker.inputTrackerFrame)
    
    faceDetectionNetwork.passthrough.link(objectTracker.inputDetectionFrame)
    faceDetectionNetwork.out.link(objectTracker.inputDetections)
    faceDetectionNetwork.out.link(script.inputs['face_det'])
    objectTracker.passthroughDetectionFrame.link(script.inputs['detFrame'])
    objectTracker.out.link(script.inputs['tracklets'])
    script.inputs['face_det'].setBlocking(False)
    script.inputs['face_det'].setQueueSize(1)
    script.inputs['detFrame'].setBlocking(False)
    script.inputs['detFrame'].setQueueSize(1)
    script.inputs['tracklets'].setBlocking(False)
    script.inputs['tracklets'].setQueueSize(1)
    
    script.setScript("""
    import socket
    import time
    
    old_age = "0"
    old_gender= "man"
    while True:
        try:
            trackletsQueue = node.io['tracklets'].tryGet()
            frame = node.io['detFrame'].tryGet()
            if trackletsQueue is not None and frame is not None:
                tracklets = trackletsQueue.tracklets
                for t in tracklets:
                    z = t.spatialCoordinates.z
                    roi = t.roi.denormalize(300, 300)
                    x1 = int(roi.topLeft().x)
                    y1 = int(roi.topLeft().y)
                    x2 = int(roi.bottomRight().x)
                    y2 = int(roi.bottomRight().y)
                    image_manip_config = ImageManipConfig()
                    if x2-x1 > 62:
                        diff = x2-x1
                        x1 = x1+diff/2
                        x2 = x2-diff/2
                    elif x2-x1 < 62:
                        x2 = x1+62
                    if y2-y1 > 62:
                        diff = y2-y1
                        y1 = y1+diff/2
                        y2 = y2-diff/2
                    elif y2-y1 < 62:
                        y2 = y1 +62
                    if x1 < 0:
                        x1 = 0
                        x2 = 61
                    if x2 > 300:
                        x1 = 300-62
                        x2 = 300
                    if y1 <0:
                        y1 = 0
                        y2 = 61
                    if y2 > 300:
                        y2 = 300
                        y1 = 300-62
                    image_manip_config.setCropRect(x1/300, y1/300, x2/300, y2/300)
                    image_manip_config.setResize(62, 62)
                    node.io['manipconfig'].send(image_manip_config)
                    node.io['frame'].send(frame)
                    time.sleep(0.2)
            time.sleep(1)
        except:
            continue
    """)
    
    image_manip_config = pipeline.create(dai.node.ImageManip)
    image_manip_config.setResize(62, 62)
    script.outputs['manipconfig'].link(image_manip_config.inputConfig)
    script.outputs['frame'].link(image_manip_config.inputImage)
    image_manip_config.out.link(ageGender.input)
    script.inputs["manip_in"].setBlocking(False)
    script.inputs["manip_in"].setQueueSize(1)
    
    with dai.Device(pipeline, dai.DeviceInfo("169.254.1.222")) as device:
        print("Connected")
        while True:
            continue
    
    ``` 
    • erik replied to this.

      Hi Jonas_declercq ,
      I have been running it for a few minutes and it looks good. My guess is that the NN sometimes produces some number with which Script node produces invalid ImageManip config, which then crashes. I'd suggest printing out ImageManip configs before sending them, so you can determine which config causes issue/crash.
      Thanks, Erik