My Code works in Windows but not in my pasberri pi 4b
RuntimeError: Communication exception - possible device error/misconfiguration. Original message 'Couldn't read data from stream: 'rgb' (X_LINK_ERROR)'
configPath = Path(r'C:\\Users\\gusta\\Desktop\\depthai-experiments\\gen2-yolo\\device-decoding\\5\\best.json')
with configPath.open() as f:
config = json.load(f)
nnConfig = config.get("nn_config", {})
\# parse input shape
if "input_size" in nnConfig:
W, H = tuple(map(int, nnConfig.get("input_size").split('x')))
\# extract metadata
metadata = nnConfig.get("NN_specific_metadata", {})
classes = metadata.get("classes", {})
coordinates = metadata.get("coordinates", {})
anchors = metadata.get("anchors", {})
anchorMasks = metadata.get("anchor_masks", {})
iouThreshold = metadata.get("iou_threshold", {})
confidenceThreshold = metadata.get("confidence_threshold", {})
print(metadata)
# parse labels nnMappings = config.get("mappings", {}) labels = nnMappings.get("labels", {})
# get model path nnPath = r'C:\\Users\\gusta\\Desktop\\depthai-experiments\\gen2-yolo\\device-decoding\\5\\best_openvino_2022.1_5shave.blob' syncNN = True # Create pipeline pipeline = dai.Pipeline() # Define sources and outputs camRgb = pipeline.create(dai.node.ColorCamera) detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork) xoutRgb = pipeline.create(dai.node.XLinkOut) nnOut = pipeline.create(dai.node.XLinkOut) camRgb.initialControl.setManualFocus(120) # 0..255 xoutRgb.setStreamName("rgb") nnOut.setStreamName("nn") # Properties camRgb.setPreviewSize(W, H) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_13_MP) camRgb.setInterleaved(False) camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
camRgb.setFps(40)
# Network specific settings detectionNetwork.setConfidenceThreshold(confidenceThreshold) detectionNetwork.setNumClasses(classes) detectionNetwork.setCoordinateSize(coordinates) detectionNetwork.setAnchors(anchors) detectionNetwork.setAnchorMasks(anchorMasks) detectionNetwork.setIouThreshold(iouThreshold) detectionNetwork.setBlobPath(nnPath) detectionNetwork.setNumInferenceThreads(2) detectionNetwork.input.setBlocking(False)
# Linking camRgb.preview.link(detectionNetwork.input) detectionNetwork.passthrough.link(xoutRgb.input) detectionNetwork.out.link(nnOut.input)
# Connect to device and start pipeline with dai.Device(pipeline) as device: # Output queues will be used to get the rgb frames and nn data from the outputs defined above qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
frame = None detections = [] counter = 0 loctime = time.time() location = util.get_system_location() lastdetection = None while True: if time.time() - loctime == 5: location = util.get_system_location() loctime = time.time() countfordetection = 0
inRgb = qRgb.get() inDet = qDet.get() if inRgb is not None: frame = inRgb.getCvFrame() # print("NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)))
if inDet is not None: detections = inDet.detections for detection in detections: if (detection.label == 0 and not frame is None): print("saved") if lastdetection is not None and util.are_images_similar(lastdetection, frame, .95): break lastdetection = frame processing_queue.put((frame, location, time.time())) break
counter += 1 cv2.imshow("frame", frame)
if cv2.waitKey(1) == ord('q'): break