• DepthAI-v2
  • X_LINK_ERROR multiple times per minute with RPI4

Using 5V 3Amp power supply for RPI4, and usbc3 cable to connect to Oak-1 camera.

The first few minutes of this running for the day does not crash at all, and then it begins to give X_LINK_ERROR every few seconds. It works fine when running on my mac, but not on RPI.

What can be causing this?

Console Logs:

First Image is on first initiation, second is while running.

Code:

from pathlib import Path
import sys
import os
import cv2
import depthai as dai
import numpy as np
import time
import argparse
import json
import blobconverter
import datetime


# Print count at set interval
def print_count(count, previous_time):
    # Print every X seconds
    TIME_INTERVAL = 60
    timestamp = datetime.datetime.utcnow()
    # Interval time hasn't passed, do nothing and pass same previous_time out
    if ((timestamp - previous_time).seconds < TIME_INTERVAL):
        return previous_time
    # Interval time passed, print
    else:
        print(count)
        return timestamp

# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", help="Provide model name or model path for inference",
                    default='best_openvino_2022.1_6shave.blob', type=str)
parser.add_argument("-c", "--config", help="Provide config path for inference",
                    default='best_openvino_2022.1_6shave.json', type=str)
parser.add_argument('-roi', '--roi_position', type=float,
                    default=0.25, help='ROI Position (0-1)')
parser.add_argument('-a', '--axis', default=False, action='store_false',
                    help='Axis for cumulative counting (default=x axis)')
args = parser.parse_args()

# parse config
configPath = Path(args.config)
if not configPath.exists():
    raise ValueError("Path {} does not exist!".format(configPath))

with configPath.open() as f:
    config = json.load(f)
nnConfig = config.get("nn_config", {})

# parse input shape
if "input_size" in nnConfig:
    W, H = tuple(map(int, nnConfig.get("input_size").split('x')))

# extract metadata
metadata = nnConfig.get("NN_specific_metadata", {})
classes = metadata.get("classes", {})
coordinates = metadata.get("coordinates", {})
anchors = metadata.get("anchors", {})
anchorMasks = metadata.get("anchor_masks", {})
iouThreshold = metadata.get("iou_threshold", {})
confidenceThreshold = metadata.get("confidence_threshold", {})

print(metadata)

# parse labels
nnMappings = config.get("mappings", {})
labels = nnMappings.get("labels", {})

# get model path
nnPath = args.model
if not Path(nnPath).exists():
    print("No blob found at {}.".format(nnPath))
    nnPath = str(blobconverter.from_zoo(args.model, shaves = 6, zoo_type = "depthai", use_cache=True))
# sync outputs
syncNN = True

# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)
objectTracker = pipeline.create(dai.node.ObjectTracker)

configIn = pipeline.create(dai.node.XLinkIn)

xlinkOut = pipeline.create(dai.node.XLinkOut)
trackerOut = pipeline.create(dai.node.XLinkOut)

xlinkOut.setStreamName("preview")
trackerOut.setStreamName("tracklets")
configIn.setStreamName('config')


# Properties
camRgb.setPreviewSize(W, H)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
camRgb.setFps(35)
camRgb.setPreviewKeepAspectRatio(False)

# Network specific settings
detectionNetwork.setConfidenceThreshold(confidenceThreshold)
detectionNetwork.setNumClasses(classes)
detectionNetwork.setCoordinateSize(coordinates)
detectionNetwork.setAnchors(anchors)
detectionNetwork.setAnchorMasks(anchorMasks)
detectionNetwork.setIouThreshold(iouThreshold)
detectionNetwork.setBlobPath(nnPath)
detectionNetwork.setNumInferenceThreads(2)
detectionNetwork.input.setBlocking(False)


objectTracker.setDetectionLabelsToTrack([0])  # track only person
# possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF
objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
# take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID
objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.UNIQUE_ID)
objectTracker.setTrackerThreshold(0.5)

# Linking
camRgb.preview.link(detectionNetwork.input)
objectTracker.passthroughTrackerFrame.link(xlinkOut.input)

configIn.out.link(camRgb.inputConfig)
camRgb.preview.link(objectTracker.inputTrackerFrame)
#detectionNetwork.passthrough.link(objectTracker.inputTrackerFrame)
detectionNetwork.passthrough.link(objectTracker.inputDetectionFrame)
detectionNetwork.out.link(objectTracker.inputDetections)
objectTracker.out.link(trackerOut.input)


# from https://www.pyimagesearch.com/2018/08/13/opencv-people-counter/
class TrackableObject:
    def __init__(self, objectID, centroid):
        # store the object ID, then initialize a list of centroids
        # using the current centroid
        self.objectID = objectID
        self.centroids = [centroid]

        # initialize a boolean used to indicate if the object has
        # already been counted or not
        self.counted = False

# Set initial timestamp for print interval
timestamp = datetime.datetime.utcnow()

# Set initial count from counter.txt file
objectCounter = int(open("counter.txt", "r").read())

while True:
    try:
        # Connect to device and start pipeline
        with dai.Device(pipeline) as device:

            #preview = device.getOutputQueue("preview", 4, False)
            preview = device.getOutputQueue("preview", 1, False)
            tracklets = device.getOutputQueue("tracklets", 4, False)
            configQueue = device.getInputQueue("config")

            # Max cropX & cropY
            maxCropX = (camRgb.getIspWidth() - camRgb.getVideoWidth()) / camRgb.getIspWidth()
            maxCropY = (camRgb.getIspHeight() - camRgb.getVideoHeight()) / camRgb.getIspHeight()
            print(maxCropX, maxCropY, camRgb.getIspWidth(), camRgb.getVideoHeight())

            # Default crop
            cropX = 0
            cropY = 0
            sendCamConfig = True

            if sendCamConfig:
                cfg = dai.ImageManipConfig()
                cfg.setCropRect(cropX, cropY, maxCropX, maxCropY)
                configQueue.send(cfg)
                print('crop - x: ', cropX, 'crop - y: ', cropY)

            startTime = time.monotonic()
            counter = 0
            fps = 0
            frame = None
            trackableObjects = {}

            while(True):
                try:
                    imgFrame = preview.get()
                    track = tracklets.get()
                    counter+=1
                    current_time = time.monotonic()
                    if (current_time - startTime) > 1 :
                        fps = counter / (current_time - startTime)
                        counter = 0
                        startTime = current_time

                    color = (255, 0, 0)
                    frame = imgFrame.getCvFrame()
                    height = frame.shape[0]
                    width = frame.shape[1]

                    trackletsData = track.tracklets
                    for t in trackletsData:
                        to = trackableObjects.get(t.id, None)
                        # calculate centroid
                        roi = t.roi.denormalize(frame.shape[1], frame.shape[0])
                        x1 = int(roi.topLeft().x)
                        y1 = int(roi.topLeft().y)
                        x2 = int(roi.bottomRight().x)
                        y2 = int(roi.bottomRight().y)
                        centroid = (int((x2-x1)/2+x1), int((y2-y1)/2+y1))
                        newX = int((x2-x1)/2+x1)
                        newY = int((y2-y1)/2+y1)

                        # If new tracklet, save its centroid
                        if t.status == dai.Tracklet.TrackingStatus.NEW:
                            to = TrackableObject(t.id, centroid)
                        elif to is not None:
                            if not to.counted:
                                y = [c[1] for c in to.centroids]
                                direction = centroid[1] - np.mean(y)
                                if centroid[1] > args.roi_position*height and direction > 0 and np.mean(y) < args.roi_position*height:
                                    objectCounter += 1
                                    if objectCounter > 999999 :
                                        objectCounter = 0
                                    open("counter.txt", "w").write(str(objectCounter))
                                    to.counted = True

                            to.centroids.append(centroid)

                        trackableObjects[t.id] = to

                    #Update timestamp and print if interval time passed
                    timestamp = print_count(objectCounter, timestamp)

                except RuntimeError as e:
                    print(datetime.datetime.utcnow())
                    print(f"Error reading data from depthAI device: {e}")
                    break  # Break out of loop and reinitialize
    except:
        continue

    Hi GilCohen
    Difficult to say, but it's probably a hardware issue. Could you swap out the usb cable? I found that sometimes when the connection is loose, just moving the cable can cause the issues you are experiencing.
    Perhaps also switch the RPI port?

    Thanks,
    Jaka

      jakaskerl

      I have used 2 different usbC3 cables (short, 2-3 feet) in different ports, 2 different RPI4s, and 2 different power supplies. It doesn't seem to be a hardware issue.

        Hi GilCohen
        Hmm, what depthai version are you using? Did you follow the installation guide for RPI?

        EDIT:
        How is resource usage on the PI? Perhaps the host gets stressed out and can't read the XLINK in time, causing the error.

        Thanks,
        Jaka

        depthai 2.22.0.0

        depthai-pipeline-graph 0.0.5

        depthai-sdk 1.13.1

        Python3 hovering around 15-30% CPU usage, nothing else using significant CPU processing.

        Start of run device usage, no crashing:

        After running for a few minutes, crashes start happening frequently:

        No changes in CPU usage.

        Thank you

          Hi GilCohen
          Could you run the script using DEPTHAI_LEVEL=DEBUG python3.. or DEPTHAI_LEVEL=TRACE python3... to check for differences when running with NN and when running without it. I suspected it could be a thermal issue but considering it runs fine on MB, this is likely not the case.

          Thanks,
          Jaka

            jakaskerl

            DEBUG:

            This was the first crash, which happens after a few minutes. After this has been running for a while, the crashes become more frequent and happen up to every 10 seconds.

            Unable to stop the trace on time to catch the output in the terminal, but it crashes just as often.

            jakaskerl

            From the TRACE, I see after a while the tracklets object list becomes very long. Is there something wrong in my code causing this buildup to happen and slow down my process?

            • erik replied to this.

              Hi GilCohen ,
              Seeing that you are using depthai 2.22, could you also try with 2.23? I believe there were some XLink changes that could affect stability.
              Thanks, Erik

                erik

                I updated to version 2.23.0.0, still having the same issues.

                pip3 list | grep depthai

                depthai                2.23.0.0

                depthai-pipeline-graph 0.0.5

                depthai-sdk            1.9.1.1