As I said I would post my solution
Run the below commands ->
sudo apt-get update
sudo apt-get install v4l2loopback-dkms
sudo apt-get install gstreamer1.0-tools gstreamer1.0-plugins-base gstreamer1.0-plugins-good
Next we setup the virtual device below I create /dev/video10..
sudo modprobe v4l2loopback devices=1 video_nr=10 card_label="VirtCam" exclusive_caps=1 max_buffers=2
ls /dev/video*
Next Run a quick test by using Cheese ( By default should be installed on the Jetson ) run the command below and and see if you see the colorbar.
gst-launch-1.0 videotestsrc ! videoconvert ! v4l2sink device=/dev/video10
Once all this is working you can then proceed to use the simple example below. Note you should have the depthAI base installed as per the site and use a virtual env --
#!/usr/bin/env python3
"""
Filename: dual_camera.py
Author: Robert Carroll
Email: —---------------------
Description: This script streams from two Oak D cameras with IP addresses 192.168.1.111 and 192.168.1.222.
Creation Date: 06-10-2023
Last Modified: 07-10-2023
License: N/A
"""
import cv2
import depthai as dai
import subprocess
# Constants for video dimensions
WIDTH = 1280
HEIGHT = 800
def create_pipeline(ip):
"""
Creates and returns a pipeline for a camera with the given IP address.
"""
pipeline = dai.Pipeline()
cam_rgb = pipeline.create(dai.node.ColorCamera)
xout_video = pipeline.create(dai.node.XLinkOut)
xout_video.setStreamName(f"video_{ip}")
device_info = dai.DeviceInfo(ip) # IP Address for the camera
cam_rgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P)
cam_rgb.setVideoSize(WIDTH, HEIGHT)
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
cam_rgb.video.link(xout_video.input)
return pipeline, device_info, xout_video.getStreamName()
def main():
"""
Main function to set up camera streams and processing.
"""
pipeline1, device_info1, stream_name1 = create_pipeline("192.168.1.111")
pipeline2, device_info2, stream_name2 = create_pipeline("192.168.1.222")
# Set up the FFmpeg command with pipe for video streaming
ffmpeg_cmd = [
"ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo",
"-pix_fmt", "bgr24", "-s", f"{WIDTH}x{HEIGHT}", "-r", "30",
"-i", "-", "-vcodec", "rawvideo", "-pix_fmt", "yuv420p",
"-f", "v4l2", "/dev/video10"
]
# Start FFmpeg process
ffmpeg_process = subprocess.Popen(ffmpeg_cmd, stdin=subprocess.PIPE)
# Connect to devices and start pipelines
with dai.Device(pipeline1, device_info1) as device1, dai.Device(pipeline2, device_info2) as device2:
video_queue1 = device1.getOutputQueue(name=stream_name1, maxSize=1, blocking=False)
video_queue2 = device2.getOutputQueue(name=stream_name2, maxSize=1, blocking=False)
count_images = 0
while True:
video_in1 = video_queue1.get()
video_in2 = video_queue2.get()
# Conditional logic to write frames from either camera to FFmpeg process
if count_images > 100:
ffmpeg_process.stdin.write(video_in1.getCvFrame().tobytes())
else:
ffmpeg_process.stdin.write(video_in2.getCvFrame().tobytes())
count_images += 1
if count_images >= 200:
count_images = 0
# Add frame processing or display logic as needed here
if cv2.waitKey(1) == ord('q'):
break
# Clean up OpenCV windows
cv2.destroyAllWindows()
if __name__ == "__main__":
main()