I'd like to record video from 2 OAK-1 w POE camera synchronized. I need to synchronized them because I need to use them on real time later on, but need to record for having sample to evaluate performance. I modify a code from the depthai-experiement github gen2-syncing/host-multiple-OAK-sync.py.
The problem I encounter is that the frame-rate is very low and unstable, like I have an average of 15 fps for a (640,360) frame. I'd like to have a stable 30 fps at this resolution of 1080p if possible.
My host systems are jetson AGX orin and Orin Nano ( I've tried on both)
Here is my code if someone can help:
edit: I need to use openCV since the frame format is used later for processing
#!/usr/bin/env python3
import cv2
import math
import depthai as dai
import contextlib
import argparse
from datetime import timedelta
from datetime import datetime
from time import time_ns
import os
import csv
parser = argparse.ArgumentParser(epilog='Press C to capture a set of frames.')
parser.add_argument('-f', '--fps', type=float, default=30,
help='Camera sensor FPS, applied to all cams')
args = parser.parse_args()
cam_socket_opts = {
'rgb' : dai.CameraBoardSocket.CAM_A
}
cam_instance = {
'rgb' : 0
}
def create_pipeline(cam_list):
# Start defining a pipeline
pipeline = dai.Pipeline()
cam = {}
xout = {}
for c in cam_list:
print(f'value of c is : {c}')
xout[c] = pipeline.create(dai.node.XLinkOut)
xout[c].setStreamName(c)
if c == 'rgb':
cam[c] = pipeline.create(dai.node.ColorCamera)
cam[c].setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam[c].setIspScale(1, 3) # 1920x1080 -> 1280x720
cam[c].isp.link(xout[c].input)
cam[c].setBoardSocket(cam_socket_opts[c])
cam[c].setFps(args.fps)
return pipeline
# https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack
with contextlib.ExitStack() as stack:
device_infos = dai.Device.getAllAvailableDevices()
if len(device_infos) == 0: raise RuntimeError("No devices found!")
else: print("Found", len(device_infos), "devices")
queues = []
path = f'videos/{datetime.now().strftime("%Y-%m-%d_%H-%M")}'
os.makedirs(path)
files = []
csv_file = f'{path}/fps.csv'
video_writers = {}
ts_p = {}
fps_data = {}
for index, device_info in enumerate(device_infos):
# Note: the pipeline isn't set here, as we don't know yet what device it is.
# The extra arguments passed are required by the existing overload variants
openvino_version = dai.OpenVINO.Version.VERSION_2021_4
usb2_mode = False
# device = stack.enter_context(dai.Device(openvino_version, device_info, usb2_mode))
device = stack.enter_context(dai.Device(device_info, dai.UsbSpeed.HIGH))
stereo = 1 < len(device.getConnectedCameras())
cam_list = {'rgb', 'left', 'right'} if stereo else {'rgb'}
# Get a customized pipeline based on identified device type
device.startPipeline(create_pipeline(cam_list))
# Output queue will be used to get the rgb frames from the output defined above
for cam in cam_list:
queues.append({
'queue': device.getOutputQueue(name=cam, maxSize=4, blocking=False),
'msgs': [], # Frame msgs
'mx': device.getMxId(),
'cam': cam
})
files.append(f'{path}/{index}.mp4')
ts_p[device.getMxId()] = time_ns()
fps_data[device.getMxId()] = []
# video_writers[device.getMxId()] = cv2.VideoWriter(files[index], cv2.VideoWriter_fourcc(*'mp4v'), 30, (640*3,360*3))
video_writers[device.getMxId()] = cv2.VideoWriter(files[index], cv2.VideoWriter_fourcc(*'mp4v'), 30, (640,360))
# video_writers[device.getMxId()] = cv2.VideoWriter(files[index], cv2.VideoWriter_fourcc(*'mp4v'), 30, (640//2,360//2))
print(device.getMxId())
def check_sync(queues, timestamp):
matching_frames = []
for q in queues:
for i, msg in enumerate(q['msgs']):
time_diff = abs(msg.getTimestamp() - timestamp)
# So below 17ms @ 30 FPS => frames are in sync
if time_diff <= timedelta(milliseconds=math.ceil(500 / args.fps)):
matching_frames.append(i)
break
if len(matching_frames) == len(queues):
# We have all frames synced. Remove the excess ones
for i, q in enumerate(queues):
q['msgs'] = q['msgs'][matching_frames[i]:]
return True
else:
return False
while True:
for q in queues:
new_msg = q['queue'].tryGet()
if new_msg is not None:
q['msgs'].append(new_msg)
if check_sync(queues, new_msg.getTimestamp()):
for q in queues:
frame = q['msgs'].pop(0).getCvFrame()
ts = time_ns()
# ADD TIME MEASUREMENT FOR FPS SHOWN IN THE CODE
fps = 1000000000/(ts - ts_p[q['mx']])
cv2.putText(frame, f"fps: {round(fps,1)}", (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 1)
fps_data[q['mx']].append(fps)
ts_p[q['mx']] = ts
cv2.imshow(f"{q['cam']} - {q['mx']}", frame)
# print(frame.shape)
video_writers[q['mx']].write(frame)
if cv2.waitKey(1) == ord('q'):
l = []
for key, value in video_writers.items():
value.release()
print(f'{key} && {value}')
l.append(fps_data[key])
rows = zip(*l)
# for i in range(len(l[0]):
# rows.append
with open(csv_file, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(video_writers.keys())
writer.writerows(rows)
print(f'The data has been written to {csv_file}')
break