I've been attempting to setup my OAKD-LR as a standalone device and have been successfully able to pull the camera feeds with the standalone example found here luxonis/depthai-experimentstree/master/gen2-cumulative-object-counting/standalone.

However when attempting to modify this script to pull the depth maps off the camera I am getting connection refused. I am able to ping my camera so I am assuming it is failing to run the script and bind to the socket. The code I am attempting to use is shown below - I am on depthAI version 2.24.0.0.

#!/usr/bin/env python3

import depthai as dai

pipeline = dai.Pipeline()

# Stereo settings

extended_disparity = True

subpixel = True

lr_check = True

left = pipeline.create(dai.node.ColorCamera)

center = pipeline.create(dai.node.ColorCamera)

right = pipeline.create(dai.node.ColorCamera)

LC_depth = pipeline.create(dai.node.StereoDepth)

LR_depth = pipeline.create(dai.node.StereoDepth)

CR_depth = pipeline.create(dai.node.StereoDepth)

left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)

left.setCamera("left")

left.setIspScale(2, 3)

center.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)

center.setCamera("center")

center.setIspScale(2, 3)

right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)

right.setCamera("right")

right.setIspScale(2, 3)

for depth_node in (LC_depth, LR_depth, CR_depth):

depth_node.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)

depth_node.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF)

depth_node.setLeftRightCheck(lr_check)

depth_node.setExtendedDisparity(extended_disparity)

depth_node.setSubpixel(subpixel)

# 5) Link ISP outputs → StereoDepth inputs

# LC: left ISP → LC_depth.left, center ISP → LC_depth.right

left.isp.link(LC_depth.left)

center.isp.link(LC_depth.right)

left.isp.link(LR_depth.left)

right.isp.link(LR_depth.right)

center.isp.link(CR_depth.left)

right.isp.link(CR_depth.right)

disp_enc_LC = pipeline.create(dai.node.VideoEncoder)

disp_enc_LR = pipeline.create(dai.node.VideoEncoder)

disp_enc_CR = pipeline.create(dai.node.VideoEncoder)

for enc in (disp_enc_LC, disp_enc_LR, disp_enc_CR):

enc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.MJPEG)

t

LC_depth.disparity.link(disp_enc_LC.input)

LR_depth.disparity.link(disp_enc_LR.input)

CR_depth.disparity.link(disp_enc_CR.input)

script_node = pipeline.create(dai.node.Script)

script_node.setScript(r"""

import socket

import struct

# Immediately notify over the ‘log’ output

node.warn("Standalone Script started!")

# Open a TCP server on port 8000 (bind to all interfaces)

server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

server_socket.bind(("0.0.0.0", 8000))

server_socket.listen(1)

node.warn("Listening on 0.0.0.0:8000…")

conn, addr = server_socket.accept()

node.warn(f"Host connected from {addr}")

while True:

# 1) Read next LC MJPEG packet

pkt_lc = node.io["encoded_LC_disp"].get()

data_lc = pkt_lc.getData()  # raw JPEG bytes

# Prefix with: [1-byte stream ID][4-byte big-endian length][JPEG data]

conn.sendall(b"\\x00" + struct.pack(">I", len(data_lc)) + data_lc)

# 2) Read next LR MJPEG packet

pkt_lr = node.io["encoded_LR_disp"].get()

data_lr = pkt_lr.getData()

conn.sendall(b"\\x01" + struct.pack(">I", len(data_lr)) + data_lr)

# 3) Read next CR MJPEG packet

pkt_cr = node.io["encoded_CR_disp"].get()

data_cr = pkt_cr.getData()

conn.sendall(b"\\x02" + struct.pack(">I", len(data_cr)) + data_cr)

""")

# 9) Link each encoder output → Script node’s input streams

disp_enc_LC.bitstream.link(script_node.inputs["encoded_LC_disp"])

disp_enc_LR.bitstream.link(script_node.inputs["encoded_LR_disp"])

disp_enc_CR.bitstream.link(script_node.inputs["encoded_CR_disp"])

xout_warning = pipeline.create(dai.node.XLinkOut)

xout_warning.setStreamName("script_warning")

# The Script node’s built-in ‘log’ output (where node.warn() goes) is “outputs['log']”

script_node.outputs["log"].link(xout_warning.input)

if name == "main":

(f, bl) = dai.DeviceBootloader.getFirstAvailableDevice()

bootloader = dai.DeviceBootloader(bl)

progress = lambda p: print(f"Flashing progress: {p\*100:.1f}%")

bootloader.flash(progress, pipeline)

    ken_hank
    If you use subpixel disparity, then the output size is 16bit which is unsupported by the videoencoder. I'd recommend first trying the app locally before running it in standalone. This error should be logged.

    Also, update to 2.30. 2.24 still has the issue where in standalone, the "node.warn()" will overflow the memory over time.

    Thanks,
    Jaka

      jakaskerl

      I was able to run the program when using my computer as the host with no errors. I've updated the script with the functionality to run it locally with a flag i.e oak_standalone.py --mode host or flash. I am still however receiving connection refused. The updated script is below:

      #!/usr/bin/env python3

      import argparse

      import cv2

      import depthai as dai

      import numpy as np

      import socket

      import struct

      import sys

      def build_pipeline(host_mode: bool):

      """
      
      Build a pipeline for either host-mode (XLinkOut disparity)
      
      or standalone flash-mode (Script-based TCP streaming).
      
      Returns:
      
        - pipeline (dai.Pipeline)
      
        - max_disp (int) if host_mode, else None
      
      """
      
      pipeline = dai.Pipeline()
      
      # Stereo settings (common)
      
      extended_disparity = True
      
      subpixel          = True
      
      lr_check          = True
      
      # Create ColorCameras (left, center, right)
      
      left   = pipeline.create(dai.node.ColorCamera)
      
      center = pipeline.create(dai.node.ColorCamera)
      
      right  = pipeline.create(dai.node.ColorCamera)
      
      left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
      
      left.setCamera("left")
      
      left.setIspScale(2, 3)
      
      center.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
      
      center.setCamera("center")
      
      center.setIspScale(2, 3)
      
      right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P)
      
      right.setCamera("right")
      
      right.setIspScale(2, 3)
      
      # Create StereoDepth nodes (LC, LR, CR)
      
      LC_depth = pipeline.create(dai.node.StereoDepth)
      
      LR_depth = pipeline.create(dai.node.StereoDepth)
      
      CR_depth = pipeline.create(dai.node.StereoDepth)
      
      for depth_node in (LC_depth, LR_depth, CR_depth):
      
          depth_node.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
      
          depth_node.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF)
      
          depth_node.setLeftRightCheck(lr_check)
      
          depth_node.setExtendedDisparity(extended_disparity)
      
          depth_node.setSubpixel(subpixel)
      
      # Link ISP outputs → StereoDepth inputs
      
      left.isp.link(LC_depth.left)
      
      center.isp.link(LC_depth.right)
      
      left.isp.link(LR_depth.left)
      
      right.isp.link(LR_depth.right)
      
      center.isp.link(CR_depth.left)
      
      right.isp.link(CR_depth.right)
      
      max_disp = None
      
      if host_mode:
      
          # Host-mode: use XLinkOut to send raw disparity frames
      
          xout_disp_LC = pipeline.create(dai.node.XLinkOut)
      
          xout_disp_LC.setStreamName("disp_LC")
      
          LC_depth.disparity.link(xout_disp_LC.input)
      
          xout_disp_LR = pipeline.create(dai.node.XLinkOut)
      
          xout_disp_LR.setStreamName("disp_LR")
      
          LR_depth.disparity.link(xout_disp_LR.input)
      
          xout_disp_CR = pipeline.create(dai.node.XLinkOut)
      
          xout_disp_CR.setStreamName("disp_CR")
      
          CR_depth.disparity.link(xout_disp_CR.input)
      
          # We can query max disparity directly from LC_depth
      
          max_disp = LC_depth.initialConfig.getMaxDisparity()
      
      else:
      
          # Standalone flash-mode: pull raw disparity via Script over TCP
      
          script_node = pipeline.create(dai.node.Script)
      
          LC_depth.disparity.link(script_node.inputs["disp_LC"])
      
          LR_depth.disparity.link(script_node.inputs["disp_LR"])
      
          CR_depth.disparity.link(script_node.inputs["disp_CR"])
      
          script_node.setScript(r"""

      import socket

      import struct

      node.warn("Standalone (raw) Script started!")

      # Bind TCP on port 8000 to all interfaces

      server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

      server_socket.bind(("0.0.0.0", 8000))

      server_socket.listen(1)

      node.warn("Listening on 0.0.0.0:8000…")

      conn, addr = server_socket.accept()

      node.warn(f"Host connected from {addr}")

      while True:

      # 1) Get LC disparity ImgFrame
      
      pkt_lc = node.io["disp_LC"].get()
      
      data_lc = pkt_lc.getData()
      
      conn.sendall(b"\\x00" + struct.pack(">I", len(data_lc)) + data_lc)
      
      # 2) Get LR disparity ImgFrame
      
      pkt_lr = node.io["disp_LR"].get()
      
      data_lr = pkt_lr.getData()
      
      conn.sendall(b"\\x01" + struct.pack(">I", len(data_lr)) + data_lr)
      
      # 3) Get CR disparity ImgFrame
      
      pkt_cr = node.io["disp_CR"].get()
      
      data_cr = pkt_cr.getData()
      
      conn.sendall(b"\\x02" + struct.pack(">I", len(data_cr)) + data_cr)

      """)

          xout_warning = pipeline.create(dai.node.XLinkOut)
      
          xout_warning.setStreamName("script_warning")
      
          script_node.outputs["log"].link(xout_warning.input)
      
      return pipeline, max_disp

      def run_host_mode():

      """
      
      Run pipeline in host-mode: open XLinkOutput queues, fetch raw disparity,
      
      normalize, colorize, and display until user presses 'q'.
      
      """
      
      pipeline, max_disp = build_pipeline(host_mode=True)
      
      with dai.Device(pipeline) as device:
      
          q_disp_LC = device.getOutputQueue(name="disp_LC", maxSize=4, blocking=False)
      
          q_disp_LR = device.getOutputQueue(name="disp_LR", maxSize=4, blocking=False)
      
          q_disp_CR = device.getOutputQueue(name="disp_CR", maxSize=4, blocking=False)
      
          # Grab one frame to get dimensions
      
          sample_LC = q_disp_LC.get()
      
          disp_LC_raw = sample_LC.getFrame()  # dtype=uint16
      
          HEIGHT, WIDTH = disp_LC_raw.shape
      
          # Display first frame
      
          disp8_LC = (disp_LC_raw \* (255.0 / max_disp)).astype(np.uint8)
      
          disp_color_LC = cv2.applyColorMap(disp8_LC, cv2.COLORMAP_JET)
      
          cv2.imshow("Disparity LC", disp_color_LC)
      
          sample_LR = q_disp_LR.get()
      
          disp_LR_raw = sample_LR.getFrame()
      
          disp8_LR = (disp_LR_raw \* (255.0 / max_disp)).astype(np.uint8)
      
          disp_color_LR = cv2.applyColorMap(disp8_LR, cv2.COLORMAP_JET)
      
          cv2.imshow("Disparity LR", disp_color_LR)
      
          sample_CR = q_disp_CR.get()
      
          disp_CR_raw = sample_CR.getFrame()
      
          disp8_CR = (disp_CR_raw \* (255.0 / max_disp)).astype(np.uint8)
      
          disp_color_CR = cv2.applyColorMap(disp8_CR, cv2.COLORMAP_JET)
      
          cv2.imshow("Disparity CR", disp_color_CR)
      
          print("Starting host-mode loop. Press ‘q’ in any window to exit.")
      
          while True:
      
              in_LC = q_disp_LC.get()
      
              in_LR = q_disp_LR.get()
      
              in_CR = q_disp_CR.get()
      
              disp_LC_raw = in_LC.getFrame()
      
              disp_LR_raw = in_LR.getFrame()
      
              disp_CR_raw = in_CR.getFrame()
      
              disp8_LC = (disp_LC_raw \* (255.0 / max_disp)).astype(np.uint8)
      
              disp8_LR = (disp_LR_raw \* (255.0 / max_disp)).astype(np.uint8)
      
              disp8_CR = (disp_CR_raw \* (255.0 / max_disp)).astype(np.uint8)
      
              disp_color_LC = cv2.applyColorMap(disp8_LC, cv2.COLORMAP_JET)
      
              disp_color_LR = cv2.applyColorMap(disp8_LR, cv2.COLORMAP_JET)
      
              disp_color_CR = cv2.applyColorMap(disp8_CR, cv2.COLORMAP_JET)
      
              cv2.imshow("Disparity LC", disp_color_LC)
      
              cv2.imshow("Disparity LR", disp_color_LR)
      
              cv2.imshow("Disparity CR", disp_color_CR)
      
              if cv2.waitKey(1) == ord('q'):
      
                  break
      
          cv2.destroyAllWindows()

      def run_flash_mode():

      """
      
      Build pipeline for standalone and flash it to the device.
      
      """
      
      pipeline, _ = build_pipeline(host_mode=False)
      
      (serial, bl_info) = dai.DeviceBootloader.getFirstAvailableDevice()
      
      bootloader = dai.DeviceBootloader(bl_info)
      
      progress = lambda p: print(f"Flashing progress: {p\*100:.1f}%")
      
      bootloader.flash(progress, pipeline)
      
      print("Flashing complete. Please power-cycle the device to run standalone.")

      def main():

      parser = argparse.ArgumentParser(description="OAK Stereo Depth: Host or Flash Mode")
      
      parser.add_argument(
      
          "--mode",
      
          choices=["host", "flash"],
      
          default="host",
      
          help="Run in 'host' mode (live display on laptop) or 'flash' mode (standalone)."
      
      )
      
      args = parser.parse_args()
      
      if args.mode == "host":
      
          run_host_mode()
      
      else:
      
          run_flash_mode()

      if name == "main":

      main()

        ken_hank
        Still not okay since host node doesn't initialize in host mode so you can't check the issue.
        I think you might be missing the script_node.setProcessor(dai.ProcessorType.LEON_CSS). I tested your code and i get a no module found for the "socket". Likely what is causing the issue on your side.

        Thanks,
        Jaka