Hello,
Thank You for your fast response.
I have tried to apply the recommndation that you gave to the code but I am getting this error:

Also this is my current code that I modifed from the POE-TCP-Streaming. I tried to edit the oak.py into accpeting the .tar.xz. Also the tar.xz file that I have does work fine. Although the device isnt using any other application I am getting this error message.
'#!/usr/bin/env python3
from pathlib import Path
import depthai as dai
from utils.annotation_node import AnnotationNode
from utils.oak_arguments import initialize_argparser
from utils.scripts import get_client_script, get_server_script
_, args = initialize_argparser()
device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device()
platform = device.getPlatform().name
MODEL_PATH = "/app/utils/yolov6n-r2-288x512.rvc4.tar.xz"
nn_archive = dai.NNArchive(MODEL_PATH)
W, H = nn_archive.getInputWidth(), nn_archive.getInputHeight()
CLASSES = nn_archive.getConfig().model.heads[0].metadata.classes
pipeline = dai.Pipeline()
cam = pipeline.create(dai.node.ColorCamera)
cam.setBoardSocket(dai.CameraBoardSocket.CAM_A)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setPreviewSize(W, H)
cam.setInterleaved(False)
cam.setFps(args.fps_limit)
left = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
right = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
stereo = pipeline.create(dai.node.StereoDepth).build(
left=left.requestOutput((W, H)),
right=right.requestOutput((W, H)),
presetMode=dai.node.StereoDepth.PresetMode.HIGH_DETAIL
)
stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
stereo.setLeftRightCheck(True)
stereo.setRectification(True)
if platform == "RVC2":
stereo.setOutputSize(W, H)
nn = pipeline.create(dai.node.SpatialDetectionNetwork).build(
input = cam, # Camera node itself
stereo = stereo, # Or None if you only want 2D
nnArchive = nn_archive,
fps = args.fps_limit,
)
nn.setConfidenceThreshold(0.5)
nn.setBoundingBoxScaleFactor(0.7)
annot = pipeline.create(AnnotationNode).build(
input_detections = nn.out,
labels = CLASSES,
)
nn.passthrough.link(annot.inputs["frame"])
video_enc = pipeline.create(dai.node.VideoEncoder)
video_enc.setDefaultProfilePreset(args.fps_limit, dai.VideoEncoderProperties.Profile.MJPEG)
annot.out_annotations.link(video_enc.input)
script = pipeline.create(dai.node.Script)
script.setProcessor(dai.ProcessorType.LEON_CSS)
video_enc.bitstream.link(script.inputs["frame"])
nn.out.link (script.inputs["detection"])
for port in ("frame", "detection"):
script.inputs[port].setBlocking(False)
script.inputs[port].setMaxSize(1)
if args.mode.lower() == "client":
script.setScript(get_client_script(args.address))
else:
script.setScript(get_server_script())
script.outputs["control"].link(cam.inputControl)
with dai.Device(pipeline) as dev:
print("Pipeline started on PoE TCP with custom YOLO archive.")
dev.waitUntilPipelineRunning()
# The host.py client will connect on port 5000 and parse:
# 32-byte header + detection string + JPEG bytes
while True:
pass
'
`