How should I modify the parameters of each camera individually in Cam_test.py?
Problems with Cam_test.py
That means I can create 4 separate channels to control the 4 cameras separately, is there a demo for that?
Hi @Elusive
No code unfortunately. But it's basically a fusion of https://docs.luxonis.com/projects/api/en/latest/samples/ColorCamera/rgb_camera_control/ for each camera (a duplication of queues for each camera).
One thing to note: B and C sockets share the same I2C bus and a hardcoded to have the same 3A settings (due to stereo) so having them run on different settings won't work. The rest should work fine.
Thanks,
Jaka
When I try to extend rgb_camera_control.py to control 4 cameras, I get an error with the following message: RuntimeError: ColorCamera(1) - Out of memory while creating pool for 'preview' frames . Number of frames: 4 each with size: 270000B.
I haven't resolved this issue yet, so hopefully I can get a response.
It still doesn't seem to be working, please take a look at it if you have time.
#!/usr/bin/env python3
*"""
This example shows usage of Camera Control message as well as ColorCamera configInput to change crop x and y
Uses 'WASD' controls to move the crop window, 'C' to capture a still image, 'T' to trigger autofocus, 'IOKL,.NM'
for manual exposure/focus/white-balance:
Control: key[dec/inc] min..max
exposure time: I O 1..33000 [us]
sensitivity iso: K L 100..1600
focus: , . 0..255 [far..near]
white balance: N M 1000..12000 (light color temperature K)
To go back to auto controls:
'E' - autoexposure
'F' - autofocus (continuous)
'B' - auto white-balance
Other controls:
'1' - AWB lock (true / false)
'2' - AE lock (true / false)
'3' - Select control: AWB mode
'4' - Select control: AE compensation
'5' - Select control: anti-banding/flicker mode
'6' - Select control: effect mode
'7' - Select control: brightness
'8' - Select control: contrast
'9' - Select control: saturation
'0' - Select control: sharpness
'[' - Select control: luma denoise
']' - Select control: chroma denoise
For the 'Select control: ...' options, use these keys to modify the value:
'-' or '_' to decrease
'+' or '=' to increase
'/' to toggle showing camera settings: exposure, ISO, lens position, color temperature
"""
*import depthai as dai
import cv2
from itertools import cycle
# Step size ('W','A','S','D' controls)
STEP_SIZE = 8
# Manual exposure/focus/white-balance set step
EXP_STEP = 500 # us
ISO_STEP = 50
LENS_STEP = 3
WB_STEP = 200
def clamp(num, v0, v1):
return max(v0, min(num, v1))
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb1 = pipeline.create(dai.node.ColorCamera)
camRgb2 = pipeline.create(dai.node.ColorCamera)
camRgb3 = pipeline.create(dai.node.ColorCamera)
camRgb4 = pipeline.create(dai.node.ColorCamera)
camRgb1.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb2.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb3.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb4.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb1.setIspScale(1,3)
camRgb2.setIspScale(1,3)
camRgb3.setIspScale(1,3)
camRgb4.setIspScale(1,3) # 1080P -> 720P
camRgb1.setBoardSocket(dai.CameraBoardSocket.CAM_A)
camRgb2.setBoardSocket(dai.CameraBoardSocket.CAM_B)
camRgb3.setBoardSocket(dai.CameraBoardSocket.CAM_C)
camRgb4.setBoardSocket(dai.CameraBoardSocket.CAM_D)
stillEncoder1 = pipeline.create(dai.node.VideoEncoder)
stillEncoder2 = pipeline.create(dai.node.VideoEncoder)
stillEncoder3 = pipeline.create(dai.node.VideoEncoder)
stillEncoder4 = pipeline.create(dai.node.VideoEncoder)
camRgb1.setNumFramesPool(1, 1, 1, 1, 1)
camRgb2.setNumFramesPool(1, 1, 1, 1, 1)
camRgb3.setNumFramesPool(1, 1, 1, 1, 1)
camRgb4.setNumFramesPool(1, 1, 1, 1, 1)
controlIn1 = pipeline.create(dai.node.XLinkIn)
controlIn2 = pipeline.create(dai.node.XLinkIn)
controlIn3 = pipeline.create(dai.node.XLinkIn)
controlIn4 = pipeline.create(dai.node.XLinkIn)
configIn1 = pipeline.create(dai.node.XLinkIn)
configIn2 = pipeline.create(dai.node.XLinkIn)
configIn3 = pipeline.create(dai.node.XLinkIn)
configIn4 = pipeline.create(dai.node.XLinkIn)
ispOut1 = pipeline.create(dai.node.XLinkOut)
ispOut2 = pipeline.create(dai.node.XLinkOut)
ispOut3 = pipeline.create(dai.node.XLinkOut)
ispOut4 = pipeline.create(dai.node.XLinkOut)
videoOut1 = pipeline.create(dai.node.XLinkOut)
videoOut2 = pipeline.create(dai.node.XLinkOut)
videoOut3 = pipeline.create(dai.node.XLinkOut)
videoOut4 = pipeline.create(dai.node.XLinkOut)
stillMjpegOut1 = pipeline.create(dai.node.XLinkOut)
stillMjpegOut2 = pipeline.create(dai.node.XLinkOut)
stillMjpegOut3 = pipeline.create(dai.node.XLinkOut)
stillMjpegOut4 = pipeline.create(dai.node.XLinkOut)
controlIn1.setStreamName('control1')
controlIn2.setStreamName('control2')
controlIn3.setStreamName('control3')
controlIn4.setStreamName('control4')
configIn1.setStreamName('config1')
configIn2.setStreamName('config2')
configIn3.setStreamName('config3')
configIn4.setStreamName('config4')
ispOut1.setStreamName('isp1')
ispOut2.setStreamName('isp2')
ispOut3.setStreamName('isp3')
ispOut4.setStreamName('isp4')
videoOut1.setStreamName('video1')
videoOut2.setStreamName('video2')
videoOut3.setStreamName('video3')
videoOut4.setStreamName('video4')
stillMjpegOut1.setStreamName('still1')
stillMjpegOut2.setStreamName('still2')
stillMjpegOut3.setStreamName('still3')
stillMjpegOut4.setStreamName('still4')
# Properties
camRgb1.setVideoSize(640,360)
camRgb2.setVideoSize(640,360)
camRgb3.setVideoSize(640,360)
camRgb4.setVideoSize(640,360)
stillEncoder1.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
stillEncoder2.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
stillEncoder3.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
stillEncoder4.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
# Linking
camRgb1.isp.link(ispOut1.input)
camRgb2.isp.link(ispOut2.input)
camRgb3.isp.link(ispOut3.input)
camRgb4.isp.link(ispOut4.input)
camRgb1.video.link(videoOut1.input)
camRgb2.video.link(videoOut2.input)
camRgb3.video.link(videoOut3.input)
camRgb4.video.link(videoOut4.input)
controlIn1.out.link(camRgb1.inputControl)
controlIn2.out.link(camRgb2.inputControl)
controlIn3.out.link(camRgb3.inputControl)
controlIn4.out.link(camRgb4.inputControl)
configIn1.out.link(camRgb1.inputConfig)
configIn2.out.link(camRgb2.inputConfig)
configIn3.out.link(camRgb3.inputConfig)
configIn4.out.link(camRgb4.inputConfig)
stillEncoder1.bitstream.link(stillMjpegOut1.input)
stillEncoder2.bitstream.link(stillMjpegOut1.input)
stillEncoder3.bitstream.link(stillMjpegOut1.input)
stillEncoder4.bitstream.link(stillMjpegOut1.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Get data queues
controlQueue1 = device.getInputQueue('control1')
controlQueue2 = device.getInputQueue('control2')
controlQueue3 = device.getInputQueue('control3')
controlQueue4 = device.getInputQueue('control4')
configQueue1 = device.getInputQueue('config1')
configQueue2 = device.getInputQueue('config2')
configQueue3 = device.getInputQueue('config3')
configQueue4 = device.getInputQueue('config4')
ispQueue1 = device.getOutputQueue('isp1')
ispQueue2 = device.getOutputQueue('isp2')
ispQueue3 = device.getOutputQueue('isp3')
ispQueue4 = device.getOutputQueue('isp4')
videoQueue1 = device.getOutputQueue('video1')
videoQueue2 = device.getOutputQueue('video2')
videoQueue3 = device.getOutputQueue('video3')
videoQueue4 = device.getOutputQueue('video4')
stillQueue1 = device.getOutputQueue('still1')
stillQueue2 = device.getOutputQueue('still2')
stillQueue3 = device.getOutputQueue('still3')
stillQueue4 = device.getOutputQueue('still4')
# Max cropX & cropY
maxCropX1 = (camRgb1.getIspWidth() - camRgb1.getVideoWidth()) / camRgb1.getIspWidth()
maxCropX2 = (camRgb2.getIspWidth() - camRgb2.getVideoWidth()) / camRgb2.getIspWidth()
maxCropX3 = (camRgb3.getIspWidth() - camRgb3.getVideoWidth()) / camRgb3.getIspWidth()
maxCropX4 = (camRgb4.getIspWidth() - camRgb4.getVideoWidth()) / camRgb4.getIspWidth()
maxCropY1 = (camRgb1.getIspHeight() - camRgb1.getVideoHeight()) / camRgb1.getIspHeight()
maxCropY2 = (camRgb2.getIspHeight() - camRgb2.getVideoHeight()) / camRgb2.getIspHeight()
maxCropY3 = (camRgb3.getIspHeight() - camRgb3.getVideoHeight()) / camRgb3.getIspHeight()
maxCropY4 = (camRgb4.getIspHeight() - camRgb4.getVideoHeight()) / camRgb4.getIspHeight()
print(maxCropX1, maxCropY1, camRgb1.getIspWidth(), camRgb1.getVideoHeight())
print(maxCropX2, maxCropY2, camRgb2.getIspWidth(), camRgb2.getVideoHeight())
print(maxCropX3, maxCropY3, camRgb3.getIspWidth(), camRgb3.getVideoHeight())
print(maxCropX4, maxCropY4, camRgb4.getIspWidth(), camRgb4.getVideoHeight())
# Default crop
cropX = 0
cropY = 0
sendCamConfig = True
# Defaults and limits for manual focus/exposure controls
lensPos = 150
expTime = 20000
sensIso = 800
wbManual = 4000
ae_comp = 0
ae_lock = False
awb_lock = False
saturation = 0
contrast = 0
brightness = 0
sharpness = 0
luma_denoise = 0
chroma_denoise = 0
control = 'none'
show = False
awb_mode = cycle([item for name, item in vars(dai.CameraControl.AutoWhiteBalanceMode).items() if name.isupper()])
anti_banding_mode = cycle([item for name, item in vars(dai.CameraControl.AntiBandingMode).items() if name.isupper()])
effect_mode = cycle([item for name, item in vars(dai.CameraControl.EffectMode).items() if name.isupper()])
d_pressed = False
while True:
vidFrames1 = videoQueue1.tryGetAll()
for vidFrame in vidFrames1:
cv2.imshow('video1', vidFrame.getCvFrame())
vidFrames1 = videoQueue1.tryGetAll()
vidFrames2 = videoQueue1.tryGetAll()
for vidFrame in vidFrames2:
cv2.imshow('video2', vidFrame.getCvFrame())
vidFrames3 = videoQueue1.tryGetAll()
for vidFrame in vidFrames3:
cv2.imshow('video3', vidFrame.getCvFrame())
vidFrames4 = videoQueue1.tryGetAll()
for vidFrame in vidFrames4:
cv2.imshow('video4', vidFrame.getCvFrame())
ispFrames = ispQueue1.tryGetAll()
for ispFrame in ispFrames:
if show:
txt = f"[{ispFrame.getSequenceNum()}] "
txt += f"Exposure: {ispFrame.getExposureTime().total_seconds()\*1000:.3f} ms, "
txt += f"ISO: {ispFrame.getSensitivity()}, "
txt += f"Lens position: {ispFrame.getLensPosition()}, "
txt += f"Color temp: {ispFrame.getColorTemperature()} K"
print(txt)
cv2.imshow('isp1', ispFrame.getCvFrame())
cv2.imshow('isp2', ispFrame.getCvFrame())
cv2.imshow('isp3', ispFrame.getCvFrame())
cv2.imshow('isp4', ispFrame.getCvFrame())
# Send new cfg to camera
if sendCamConfig:
cfg = dai.ImageManipConfig()
cfg.setCropRect(cropX, cropY, 0, 0)
configQueue1.send(cfg)
print('Sending new crop - x: ', cropX, ' y: ', cropY)
sendCamConfig = False
stillFrames = stillQueue1.tryGetAll()
for stillFrame in stillFrames:
# Decode JPEG
frame = cv2.imdecode(stillFrame.getData(), cv2.IMREAD_UNCHANGED)
# Display
cv2.imshow('still', frame)
# Update screen (1ms pooling rate)
key = cv2.waitKey(1)
if key == ord('D'):
d_pressed = True
print("tab D")
if d_pressed:
if key == ord('q'):
break
elif key == ord('/'):
show = not show
if not show: print("Printing camera settings: OFF")
elif key == ord('c'):
ctrl = dai.CameraControl()
ctrl.setCaptureStill(True)
controlQueue1.send(ctrl)
elif key == ord('t'):
print("Autofocus trigger (and disable continuous)")
ctrl = dai.CameraControl()
ctrl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.AUTO)
ctrl.setAutoFocusTrigger()
controlQueue1.send(ctrl)
elif key == ord('f'):
print("Autofocus enable, continuous")
ctrl = dai.CameraControl()
ctrl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.CONTINUOUS_VIDEO)
controlQueue1.send(ctrl)
elif key == ord('e'):
print("Autoexposure enable")
ctrl = dai.CameraControl()
ctrl.setAutoExposureEnable()
controlQueue1.send(ctrl)
elif key == ord('b'):
print("Auto white-balance enable")
ctrl = dai.CameraControl()
ctrl.setAutoWhiteBalanceMode(dai.CameraControl.AutoWhiteBalanceMode.AUTO)
controlQueue1.send(ctrl)
elif key in [ord(','), ord('.')]:
if key == ord(','): lensPos -= LENS_STEP
if key == ord('.'): lensPos += LENS_STEP
lensPos = clamp(lensPos, 0, 255)
print("Setting manual focus, lens position: ", lensPos)
ctrl = dai.CameraControl()
ctrl.setManualFocus(lensPos)
controlQueue1.send(ctrl)
elif key in [ord('i'), ord('o'), ord('k'), ord('l')]:
if key == ord('i'): expTime -= EXP_STEP
if key == ord('o'): expTime += EXP_STEP
if key == ord('k'): sensIso -= ISO_STEP
if key == ord('l'): sensIso += ISO_STEP
expTime = clamp(expTime, 1, 33000)
sensIso = clamp(sensIso, 100, 1600)
print("Setting manual exposure, time: ", expTime, "iso: ", sensIso)
ctrl = dai.CameraControl()
ctrl.setManualExposure(expTime, sensIso)
controlQueue1.send(ctrl)
elif key in [ord('n'), ord('m')]:
if key == ord('n'): wbManual -= WB_STEP
if key == ord('m'): wbManual += WB_STEP
wbManual = clamp(wbManual, 1000, 12000)
print("Setting manual white balance, temperature: ", wbManual, "K")
ctrl = dai.CameraControl()
ctrl.setManualWhiteBalance(wbManual)
controlQueue1.send(ctrl)
elif key in [ord('w'), ord('a'), ord('s'), ord('d')]:
if key == ord('a'):
cropX = cropX - (maxCropX1 / camRgb1.getResolutionWidth()) \* STEP_SIZE
if cropX < 0: cropX = 0
elif key == ord('d'):
cropX = cropX + (maxCropX1 / camRgb1.getResolutionWidth()) \* STEP_SIZE
if cropX > maxCropX1: cropX = maxCropX1
elif key == ord('w'):
cropY = cropY - (maxCropY1 / camRgb1.getResolutionHeight()) \* STEP_SIZE
if cropY < 0: cropY = 0
elif key == ord('s'):
cropY = cropY + (maxCropY1 / camRgb1.getResolutionHeight()) \* STEP_SIZE
if cropY > maxCropY1: cropY = maxCropY1
sendCamConfig = True
elif key == ord('1'):
awb_lock = not awb_lock
print("Auto white balance lock:", awb_lock)
ctrl = dai.CameraControl()
ctrl.setAutoWhiteBalanceLock(awb_lock)
controlQueue1.send(ctrl)
elif key == ord('2'):
ae_lock = not ae_lock
print("Auto exposure lock:", ae_lock)
ctrl = dai.CameraControl()
ctrl.setAutoExposureLock(ae_lock)
controlQueue1.send(ctrl)
elif key >= 0 and chr(key) in '34567890[]':
if key == ord('3'): control = 'awb_mode'
elif key == ord('4'): control = 'ae_comp'
elif key == ord('5'): control = 'anti_banding_mode'
elif key == ord('6'): control = 'effect_mode'
elif key == ord('7'): control = 'brightness'
elif key == ord('8'): control = 'contrast'
elif key == ord('9'): control = 'saturation'
elif key == ord('0'): control = 'sharpness'
elif key == ord('['): control = 'luma_denoise'
elif key == ord(']'): control = 'chroma_denoise'
print("Selected control:", control)
elif key in [ord('-'), ord('_'), ord('+'), ord('=')]:
change = 0
if key in [ord('-'), ord('_')]: change = -1
if key in [ord('+'), ord('=')]: change = 1
ctrl = dai.CameraControl()
if control == 'none':
print("Please select a control first using keys 3..9 0 [ ]")
elif control == 'ae_comp':
ae_comp = clamp(ae_comp + change, -9, 9)
print("Auto exposure compensation:", ae_comp)
ctrl.setAutoExposureCompensation(ae_comp)
elif control == 'anti_banding_mode':
abm = next(anti_banding_mode)
print("Anti-banding mode:", abm)
ctrl.setAntiBandingMode(abm)
elif control == 'awb_mode':
awb = next(awb_mode)
print("Auto white balance mode:", awb)
ctrl.setAutoWhiteBalanceMode(awb)
elif control == 'effect_mode':
eff = next(effect_mode)
print("Effect mode:", eff)
ctrl.setEffectMode(eff)
elif control == 'brightness':
brightness = clamp(brightness + change, -10, 10)
print("Brightness:", brightness)
ctrl.setBrightness(brightness)
elif control == 'contrast':
contrast = clamp(contrast + change, -10, 10)
print("Contrast:", contrast)
ctrl.setContrast(contrast)
elif control == 'saturation':
saturation = clamp(saturation + change, -10, 10)
print("Saturation:", saturation)
ctrl.setSaturation(saturation)
elif control == 'sharpness':
sharpness = clamp(sharpness + change, 0, 4)
print("Sharpness:", sharpness)
ctrl.setSharpness(sharpness)
elif control == 'luma_denoise':
luma_denoise = clamp(luma_denoise + change, 0, 4)
print("Luma denoise:", luma_denoise)
ctrl.setLumaDenoise(luma_denoise)
elif control == 'chroma_denoise':
chroma_denoise = clamp(chroma_denoise + change, 0, 4)
print("Chroma denoise:", chroma_denoise)
ctrl.setChromaDenoise(chroma_denoise)
controlQueue1.send(ctrl)
Elusive
Ran some tests. It looks like due to preallocated sizes for XlinkIN nodes, the device runs out of memory.
You can solve that by setting the maximum size for input queue.
controlIn1 = pipeline.create(dai.node.XLinkIn)
controlIn2 = pipeline.create(dai.node.XLinkIn)
controlIn3 = pipeline.create(dai.node.XLinkIn)
controlIn4 = pipeline.create(dai.node.XLinkIn)
controlIn1.setMaxDataSize(16)
controlIn2.setMaxDataSize(16)
controlIn3.setMaxDataSize(16)
controlIn4.setMaxDataSize(16)
configIn1 = pipeline.create(dai.node.XLinkIn)
configIn2 = pipeline.create(dai.node.XLinkIn)
configIn3 = pipeline.create(dai.node.XLinkIn)
configIn4 = pipeline.create(dai.node.XLinkIn)
controlIn1.setMaxDataSize(16)
controlIn2.setMaxDataSize(16)
controlIn3.setMaxDataSize(16)
controlIn4.setMaxDataSize(16)
With this change, I was able to make it work.
Moreover:
ispFrames = ispQueue1.tryGetAll()
for ispFrame in ispFrames:
if show:
txt = f"[{ispFrame.getSequenceNum()}] "
txt += f"Exposure: {ispFrame.getExposureTime().total_seconds()*1000:.3f} ms, "
txt += f"ISO: {ispFrame.getSensitivity()}, "
txt += f"Lens position: {ispFrame.getLensPosition()}, "
txt += f"Color temp: {ispFrame.getColorTemperature()} K"
print(txt)
cv2.imshow('isp1', ispFrame.getCvFrame())
cv2.imshow('isp2', ispFrame.getCvFrame())
cv2.imshow('isp3', ispFrame.getCvFrame())
cv2.imshow('isp4', ispFrame.getCvFrame())
The frame viewing part is wrong in many instances. Fix this to get all four streams.
Thanks,
Jaka
Thank you for your reply. The method of using this frame display is wrong, but I don't quite understand why I can't get the frames for the video stream.
But it still doesn't seem to be working, can you take a look?
while True:
vidFrames1 = videoQueue1.tryGet()
if vidFrames1 is not None:
cv2.imshow('video1', vidFrames1.getCvFrame())
vidFrames2 = videoQueue2.tryGet()
if vidFrames2 is not None:
cv2.imshow('video2', vidFrames2.getCvFrame())
vidFrames3 = videoQueue3.tryGet()
if vidFrames3 is not None:
cv2.imshow('video3', vidFrames3.getCvFrame())
vidFrames4 = videoQueue4.tryGet()
if vidFrames4 is not None:
cv2.imshow('video4', vidFrames4.getCvFrame())
ispFrames1 = ispQueue1.tryGet()
for ispFrame1 in ispFrames1:
if show:
txt = f"[{ispFrame1.getSequenceNum()}] "
txt += f"Exposure: {ispFrame1.getExposureTime().total_seconds() \* 1000:.3f} ms, "
txt += f"ISO: {ispFrame1.getSensitivity()}, "
txt += f"Lens position: {ispFrame1.getLensPosition()}, "
txt += f"Color temp: {ispFrame1.getColorTemperature()} K"
print(txt)
cv2.imshow('isp1', ispFrame1.getCvFrame())
ispFrames2 = ispQueue3.tryGet()
for ispFrame2 in ispFrames2:
if show:
txt = f"[{ispFrame2.getSequenceNum()}] "
txt += f"Exposure: {ispFrame2.getExposureTime().total_seconds() \* 1000:.3f} ms, "
txt += f"ISO: {ispFrame2.getSensitivity()}, "
txt += f"Lens position: {ispFrame2.getLensPosition()}, "
txt += f"Color temp: {ispFrame2.getColorTemperature()} K"
print(txt)
cv2.imshow('isp3', ispFrame2.getCvFrame())
ispFrames3 = ispQueue3.tryGet()
for ispFrame3 in ispFrames3:
if show:
txt = f"[{ispFrame3.getSequenceNum()}] "
txt += f"Exposure: {ispFrame3.getExposureTime().total_seconds() \* 1000:.3f} ms, "
txt += f"ISO: {ispFrame3.getSensitivity()}, "
txt += f"Lens position: {ispFrame3.getLensPosition()}, "
txt += f"Color temp: {ispFrame3.getColorTemperature()} K"
print(txt)
cv2.imshow('isp3', ispFrame3.getCvFrame())
ispFrames4 = ispQueue4.tryGet()
for ispFrame4 in ispFrames4:
if show:
txt = f"[{ispFrame4.getSequenceNum()}] "
txt += f"Exposure: {ispFrame4.getExposureTime().total_seconds() \* 1000:.3f} ms, "
txt += f"ISO: {ispFrame4.getSensitivity()}, "
txt += f"Lens position: {ispFrame4.getLensPosition()}, "
txt += f"Color temp: {ispFrame4.getColorTemperature()} K"
print(txt)
cv2.imshow('isp4', ispFrame4.getCvFrame())