erik Apologies, I wasn't familiar with the term, here it is:
import numpy as np
import cv2
import depthai as dai
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setIspScale(2,3) # 1080P -> 720P
stillEncoder = pipeline.create(dai.node.VideoEncoder)
controlIn = pipeline.create(dai.node.XLinkIn)
configIn = pipeline.create(dai.node.XLinkIn)
ispOut = pipeline.create(dai.node.XLinkOut)
videoOut = pipeline.create(dai.node.XLinkOut)
stillMjpegOut = pipeline.create(dai.node.XLinkOut)
controlIn.setStreamName('control')
configIn.setStreamName('config')
ispOut.setStreamName('isp')
videoOut.setStreamName('video')
stillMjpegOut.setStreamName('still')
# Properties
camRgb.setVideoSize(1080, 720)
stillEncoder.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
# Linking
camRgb.isp.link(ispOut.input)
camRgb.still.link(stillEncoder.input)
camRgb.video.link(videoOut.input)
controlIn.out.link(camRgb.inputControl)
configIn.out.link(camRgb.inputConfig)
stillEncoder.bitstream.link(stillMjpegOut.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Get data queues
controlQueue = device.getInputQueue('control')
configQueue = device.getInputQueue('config')
ispQueue = device.getOutputQueue('isp')
videoQueue = device.getOutputQueue('video')
stillQueue = device.getOutputQueue('still')
# Max cropX & cropY
maxCropX = (camRgb.getIspWidth() - camRgb.getVideoWidth()) / camRgb.getIspWidth()
maxCropY = (camRgb.getIspHeight() - camRgb.getVideoHeight()) / camRgb.getIspHeight()
print(maxCropX, maxCropY, camRgb.getIspWidth(), camRgb.getVideoHeight())
while True:
predefined_setpoint = 0.82
videoQueue = device.getOutputQueue('video')
vidFrames = videoQueue.tryGetAll()
for vidFrame in vidFrames:
# get frames and convert to HSV
frame = vidFrame.getCvFrame()
hsvImage = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# get average of V values and determine brightness factor
h, s, v = cv2.split(hsvImage)
live_value = (np.mean(v)) / 255
brightness_factor = predefined_setpoint / live_value
# scale v values in hsvImage by brightness factor and convert to BGR
hsvImage[..., 2] = cv2.multiply(hsvImage[..., 2], brightness_factor)
new_frame = cv2.cvtColor(hsvImage, cv2.COLOR_HSV2BGR)
# set type to unit8 and show frame
new_frame = new_frame.astype(np.uint8)
cv2.imshow('isp', new_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()