I want to send the depth output of the SterioDepth node as a gray image stream via TCP to another PC to do further post-processing.
import depthai as dai
import time
# Start defining a pipeline
pipeline = dai.Pipeline()
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
depth = pipeline.create(dai.node.StereoDepth)
extended_disparity = False
# Better accuracy for longer distance, fractional disparity 32-levels:
subpixel = False
# Better handling for occlusions:
lr_check = False
# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
depth.setLeftRightCheck(lr_check)
depth.setExtendedDisparity(extended_disparity)
depth.setSubpixel(subpixel)
# Linking
monoLeft.out.link(depth.left)
monoRight.out.link(depth.right)
videoEnc3 = pipeline.create(dai.node.VideoEncoder)
videoEnc3.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.MJPEG)
# depth.disparity.link(videoEnc3.input)
depth.depth.link(videoEnc3.input)
script3 = pipeline.create(dai.node.Script)
script3.setProcessor(dai.ProcessorType.LEON_CSS)
videoEnc3.bitstream.link(script3.inputs['frame'])
script3.inputs['frame'].setBlocking(False)
script3.inputs['frame'].setQueueSize(1)
script3.setScript("""
import socket
import time
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("0.0.0.0", 5000))
server.listen()
node.warn("Server up")
while True:
conn, client = server.accept()
node.warn(f"Connected to client IP: {client}")
try:
while True:
pck = node.io["frame"].get()
data = pck.getData()
ts = pck.getTimestamp()
header = f"ABCDE " + str(ts.total_seconds()).ljust(18) + str(len(data)).ljust(8)
# node.warn(f'>{header}<')
conn.send(bytes(header, encoding='ascii'))
conn.send(data)
except Exception as e:
node.warn("Client disconnected")
with dai.Device(pipeline) as device:
pass
print("Connected")
while True:
time.sleep(1)
The problem is that the depth img is encoded using 16 bits (StereoDepth doc) but the Video Encoder which I need to use to send the video using TCP only supports NV12 or GRAY8 which uses 8 bits for a channel.
I try to run the code, I get this error:
[VideoEncoder(3)] [warning] Arrived frame type (14) is not either NV12 or YUV400p (8-bit Gray)
I am able to link disparity and the video encoder depth.disparity.link(videoEnc3.input)
since every pixel of the disparity is also only encoded with 8 bit.
My idea current idea is to receive the disparity and manually convert it to a depth image. Unfortunately, this does not work if subpixels are enabled, which would be important for my application.
Does anyone have an idea how I can solve this problem?
- Is there a way to send the data in a different format over TCP?
- Is there a way to compress the depth img to uint8 before linking it to the encoder?
- Is there a way to separate the depth data to different color channels?
Thank you in advance