I have a FFC + IMX582 that I am testing. My usecase is to save a full resolution still (preferably jpeg encoded) when a trigger command is sent and at the same time also read the output from a neural net that's running on the preview stream. Without the neural net in the equation the triggering code works fine and I can get the still frame without issue. When the neural net is active, however, the triggering code behaves inconsistently. Some times I can get a couple of triggers and then I can't get anymore, other times even the first trigger hangs (in that no data comes into the output queue).
The camera is still working, I can still retrieve the preview frames and the output of the neural net. I just can't seem to get at the still captures. I'm attaching my code below.
#!/usr/local/bin/python3
import cv2 # opencv - display the video stream
import depthai # depthai - access the camera and its data packets
import time
import numpy as np
dai = depthai
pipeline = depthai.Pipeline()
cam_rgb = pipeline.create(depthai.node.ColorCamera)
cam_rgb.setPreviewSize(640, 480)
cam_rgb.setInterleaved(False)
cam_rgb.setBoardSocket(depthai.CameraBoardSocket.CAM_A) # Same as CameraBoardSocket.RGB
# cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_5312X6000)
neural_network = pipeline.create(depthai.node.NeuralNetwork)
neural_network.setBlobPath("/code/pricetag_model_oakd.blob")
neural_network.input.setBlocking(False)
xout_nn = pipeline.create(depthai.node.XLinkOut)
xout_nn.setStreamName("nn_out")
neural_network.out.link(xout_nn.input)
# cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_4000X3000)
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_5312X6000)
# Decrease pool sizes for all outputs (raw, isp, preview, video, still):
cam_rgb.setNumFramesPool(2, 1, 2, 1, 1)
cam_rgb.initialControl.setAutoFocusMode(depthai.RawCameraControl.AutoFocusMode.CONTINUOUS_VIDEO)
cam_rgb.initialControl.setAutoFocusTrigger()
# jpeg encoder settings, outputs EncodedFrame https://docs.luxonis.com/software/depthai-components/messages/encodedframe/
jpegEncoder = pipeline.create(dai.node.VideoEncoder)
jpegEncoder.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
jpegEncoder.setNumFramesPool(1)
# jpegEncoder.setLossless(True) # Lossless only for MJPEG
jpegEncoder.setQuality(99) # 0-100, 100 being the best quality (not lossless though)
xout_rgb = pipeline.create(depthai.node.XLinkOut)
xout_rgb.setStreamName("rgb")
cam_rgb.still.link(jpegEncoder.input)
jpegEncoder.bitstream.link(xout_rgb.input)
trigger_in = pipeline.create(depthai.node.XLinkIn)
trigger_in.setNumFrames(1)
trigger_in.setStreamName("trigger")
trigger_in.out.link(cam_rgb.inputControl)
cam_rgb.preview.link(neural_network.input)
xout_manip = pipeline.create(depthai.node.XLinkOut)
xout_manip.setStreamName("manip")
neural_network.out.link(xout_manip.input)
xout_pass = pipeline.create(depthai.node.XLinkOut)
xout_pass.setStreamName("pass")
neural_network.passthrough.link(xout_pass.input)
with depthai.Device(pipeline) as device:
device.setLogLevel(dai.LogLevel.DEBUG)
device.setLogOutputLevel(dai.LogLevel.DEBUG)
q_rgb = device.getOutputQueue("rgb", maxSize=1, blocking=False)
q_trigg = device.getInputQueue("trigger")
q_nn = device.getOutputQueue("nn_out", maxSize=1, blocking=False)
q_manip = device.getOutputQueue("manip", maxSize=1, blocking=False)
q_pass = device.getOutputQueue("pass", maxSize=1, blocking=False)
while True:
x = input("Do trigger?")
start = time.time()
ctrl = depthai.CameraControl()
ctrl.setCaptureStill(True)
print(f"Starting to send trigger request at {time.time() - start} seconds")
q_trigg.send(ctrl)
print(f"Sent trigger request at {time.time() - start} seconds")
in_rgb = q_pass.get() # this continues to work
# in_rgb = q_rgb.get() # this does not work when the neural net is active
if in_rgb is not None:
oot = low_res = q_manip.get()
out1 = (np.array(oot.getLayerFp16("model_1")).reshape((1, 480, 640, 1))[0, :, :, 0]*255).astype("uint8")
print(f"got in at {time.time() - start} seconds")
frame = in_rgb.getCvFrame()
cv2.imwrite("/code/tmp/im.png", frame)
cv2.imwrite("/code/tmp/map.png", out1)
print(f"got done at {time.time() - start} seconds")
else:
print("failed to get frame")