`import time
import depthai as dai
import cv2
from itertools import cycle
import numpy as np
#Step size ('W','A','S','D' controls)
STEP_SIZE = 8
#Manual exposure/focus/white-balance set step
EXP_STEP = 500 # us
ISO_STEP = 50
LENS_STEP = 3
WB_STEP = 200
#===> Tof Color Map
cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET)
cvColorMap[0] = [0, 0, 0]
#<===
def clamp(num, v0, v1):
return max(v0, min(num, v1))
#Create pipeline for Rgb and ToF
pipeline = dai.Pipeline()
#Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_C)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP)
camRgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG)
camRgb.setIspScale(1,2) # 1080P -> 720P
stillEncoder = pipeline.create(dai.node.VideoEncoder)
controlIn = pipeline.create(dai.node.XLinkIn)
configIn = pipeline.create(dai.node.XLinkIn)
ispOut = pipeline.create(dai.node.XLinkOut)
videoOut = pipeline.create(dai.node.XLinkOut)
stillMjpegOut = pipeline.create(dai.node.XLinkOut)
controlIn.setStreamName('control')
configIn.setStreamName('config')
ispOut.setStreamName('isp')
videoOut.setStreamName('video')
stillMjpegOut.setStreamName('still')
#Properties
#Small Video cropped
camRgb.setVideoSize(640,360)
stillEncoder.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG)
#Linking
camRgb.isp.link(ispOut.input)
camRgb.still.link(stillEncoder.input)
camRgb.video.link(videoOut.input)
controlIn.out.link(camRgb.inputControl)
configIn.out.link(camRgb.inputConfig)
stillEncoder.bitstream.link(stillMjpegOut.input)
#=========> ToF Pipeline Begin
tof = pipeline.create(dai.node.ToF)
#Configure the ToF node
tofConfig = tof.initialConfig.get()
#Optional. Best accuracy, but adds motion blur.
#see ToF node docs on how to reduce/eliminate motion blur.
tofConfig.enableOpticalCorrection = True
tofConfig.enablePhaseShuffleTemporalFilter = True
tofConfig.phaseUnwrappingLevel = 1
tofConfig.phaseUnwrapErrorThreshold = 25
tofConfig.enableTemperatureCorrection = False # Not yet supported
xinTofConfig = pipeline.create(dai.node.XLinkIn)
xinTofConfig.setStreamName("tofConfig")
xinTofConfig.out.link(tof.inputConfig)
tof.initialConfig.set(tofConfig)
cam_tof = pipeline.create(dai.node.Camera)
cam_tof.setFps(120) # ToF node will produce depth frames at /2 of this rate
cam_tof.setBoardSocket(dai.CameraBoardSocket.CAM_A)
cam_tof.raw.link(tof.input)
cam_tof.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG)
xout = pipeline.create(dai.node.XLinkOut)
xout.setStreamName("depth")
tof.depth.link(xout.input)
tofConfig = tof.initialConfig.get()
#<================== End ToF Pipeline
#Connect to device and start pipeline
with dai.Device(pipeline) as device:
#Get data queues
controlQueue = device.getInputQueue('control')
configQueue = device.getInputQueue('config')
ispQueue = device.getOutputQueue('isp')
videoQueue = device.getOutputQueue('video')
stillQueue = device.getOutputQueue('still')
# Max cropX & cropY
maxCropX = (camRgb.getIspWidth() - camRgb.getVideoWidth()) / camRgb.getIspWidth()
maxCropY = (camRgb.getIspHeight() - camRgb.getVideoHeight()) / camRgb.getIspHeight()
print(maxCropX, maxCropY, camRgb.getIspWidth(), camRgb.getVideoHeight())
# Default crop
cropX = 0
cropY = 0
sendCamConfig = True
# Defaults and limits for manual focus/exposure controls
lensPos = 150
expTime = 20000
sensIso = 800
wbManual = 4000
ae_comp = 0
ae_lock = False
awb_lock = False
saturation = 0
contrast = 0
brightness = 0
sharpness = 0
luma_denoise = 0
chroma_denoise = 0
control = 'none'
show = False
awb_mode = cycle([item for name, item in vars(dai.CameraControl.AutoWhiteBalanceMode).items() if name.isupper()])
anti_banding_mode = cycle([item for name, item in vars(dai.CameraControl.AntiBandingMode).items() if name.isupper()])
effect_mode = cycle([item for name, item in vars(dai.CameraControl.EffectMode).items() if name.isupper()])
#=====> ToF Data get
qDepth = device.getOutputQueue(name="depth")
tofConfigInQueue = device.getInputQueue("tofConfig")
counter = 0
#<===== ToF Data get END
while True:
#====> ToF Camera Features
start = time.time()
key = cv2.waitKey(1)
if key == ord('f'):
tofConfig.enableFPPNCorrection = not tofConfig.enableFPPNCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('o'):
tofConfig.enableOpticalCorrection = not tofConfig.enableOpticalCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('w'):
tofConfig.enableWiggleCorrection = not tofConfig.enableWiggleCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('t'):
tofConfig.enableTemperatureCorrection = not tofConfig.enableTemperatureCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('q'):
break
elif key == ord('0'):
tofConfig.enablePhaseUnwrapping = False
tofConfig.phaseUnwrappingLevel = 0
tofConfigInQueue.send(tofConfig)
elif key == ord('1'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 1
tofConfigInQueue.send(tofConfig)
elif key == ord('2'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 2
tofConfigInQueue.send(tofConfig)
elif key == ord('3'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 3
tofConfigInQueue.send(tofConfig)
elif key == ord('4'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 4
tofConfigInQueue.send(tofConfig)
elif key == ord('5'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 5
tofConfigInQueue.send(tofConfig)
elif key == ord('z'):
medianSettings = [dai.MedianFilter.MEDIAN_OFF, dai.MedianFilter.KERNEL_3x3, dai.MedianFilter.KERNEL_5x5,
dai.MedianFilter.KERNEL_7x7]
currentMedian = tofConfig.median
nextMedian = medianSettings[(medianSettings.index(currentMedian) + 1) % len(medianSettings)]
print(f"Changing median to {nextMedian.name} from {currentMedian.name}")
tofConfig.median = nextMedian
tofConfigInQueue.send(tofConfig)
imgFrame = qDepth.get() # blocking call, will wait until a new data has arrived
depth_map = imgFrame.getFrame()
max_depth = (tofConfig.phaseUnwrappingLevel + 1) * 1500 # 100MHz modulation freq.
depth_colorized = np.interp(depth_map, (0, max_depth), (0, 255)).astype(np.uint8)
depth_colorized = cv2.applyColorMap(depth_colorized, cvColorMap)
cv2.imshow("Colorized depth", depth_colorized)
counter += 1
# <==== ToF Camera Features and Show END
vidFrames = videoQueue.tryGetAll()
for vidFrame in vidFrames:
# Showing CROPPED Frame Window
cv2.imshow('video', vidFrame.getCvFrame())
ispFrames = ispQueue.tryGetAll()
for ispFrame in ispFrames:
if show:
txt = f"[{ispFrame.getSequenceNum()}] "
txt += f"Exposure: {ispFrame.getExposureTime().total_seconds()*1000:.3f} ms, "
txt += f"ISO: {ispFrame.getSensitivity()}, "
txt += f"Lens position: {ispFrame.getLensPosition()}, "
txt += f"Color temp: {ispFrame.getColorTemperature()} K"
print(txt)
#Showing original color image
cv2.imshow('isp', ispFrame.getCvFrame())
# Send new cfg to camera
if sendCamConfig:
cfg = dai.ImageManipConfig()
cfg.setCropRect(cropX, cropY, 0, 0)
configQueue.send(cfg)
print('Sending new crop - x: ', cropX, ' y: ', cropY)
sendCamConfig = False
stillFrames = stillQueue.tryGetAll()
for stillFrame in stillFrames:
# Decode JPEG
frame = cv2.imdecode(stillFrame.getData(), cv2.IMREAD_UNCHANGED)
# Display
cv2.imshow('still', frame)
# Update screen (1ms pooling rate)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('/'):
show = not show
if not show: print("Printing camera settings: OFF")
elif key == ord('c'):
ctrl = dai.CameraControl()
ctrl.setCaptureStill(True)
controlQueue.send(ctrl)
elif key == ord('t'):
print("Autofocus trigger (and disable continuous)")
ctrl = dai.CameraControl()
ctrl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.AUTO)
ctrl.setAutoFocusTrigger()
controlQueue.send(ctrl)
elif key == ord('f'):
print("Autofocus enable, continuous")
ctrl = dai.CameraControl()
ctrl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.CONTINUOUS_VIDEO)
controlQueue.send(ctrl)
elif key == ord('e'):
print("Autoexposure enable")
ctrl = dai.CameraControl()
ctrl.setAutoExposureEnable()
controlQueue.send(ctrl)
elif key == ord('b'):
print("Auto white-balance enable")
ctrl = dai.CameraControl()
ctrl.setAutoWhiteBalanceMode(dai.CameraControl.AutoWhiteBalanceMode.AUTO)
controlQueue.send(ctrl)
elif key in [ord(','), ord('.')]:
if key == ord(','): lensPos -= LENS_STEP
if key == ord('.'): lensPos += LENS_STEP
lensPos = clamp(lensPos, 0, 255)
print("Setting manual focus, lens position: ", lensPos)
ctrl = dai.CameraControl()
ctrl.setManualFocus(lensPos)
controlQueue.send(ctrl)
elif key in [ord('i'), ord('o'), ord('k'), ord('l')]:
if key == ord('i'): expTime -= EXP_STEP
if key == ord('o'): expTime += EXP_STEP
if key == ord('k'): sensIso -= ISO_STEP
if key == ord('l'): sensIso += ISO_STEP
expTime = clamp(expTime, 1, 33000)
sensIso = clamp(sensIso, 100, 1600)
print("Setting manual exposure, time: ", expTime, "iso: ", sensIso)
ctrl = dai.CameraControl()
ctrl.setManualExposure(expTime, sensIso)
controlQueue.send(ctrl)
elif key in [ord('n'), ord('m')]:
if key == ord('n'): wbManual -= WB_STEP
if key == ord('m'): wbManual += WB_STEP
wbManual = clamp(wbManual, 1000, 12000)
print("Setting manual white balance, temperature: ", wbManual, "K")
ctrl = dai.CameraControl()
ctrl.setManualWhiteBalance(wbManual)
controlQueue.send(ctrl)
elif key in [ord('w'), ord('a'), ord('s'), ord('d')]:
if key == ord('a'):
cropX = cropX - (maxCropX / camRgb.getResolutionWidth()) * STEP_SIZE
if cropX < 0: cropX = 0
elif key == ord('d'):
cropX = cropX + (maxCropX / camRgb.getResolutionWidth()) * STEP_SIZE
if cropX > maxCropX: cropX = maxCropX
elif key == ord('w'):
cropY = cropY - (maxCropY / camRgb.getResolutionHeight()) * STEP_SIZE
if cropY < 0: cropY = 0
elif key == ord('s'):
cropY = cropY + (maxCropY / camRgb.getResolutionHeight()) * STEP_SIZE
if cropY > maxCropY: cropY = maxCropY
sendCamConfig = True
elif key == ord('1'):
awb_lock = not awb_lock
print("Auto white balance lock:", awb_lock)
ctrl = dai.CameraControl()
ctrl.setAutoWhiteBalanceLock(awb_lock)
controlQueue.send(ctrl)
elif key == ord('2'):
ae_lock = not ae_lock
print("Auto exposure lock:", ae_lock)
ctrl = dai.CameraControl()
ctrl.setAutoExposureLock(ae_lock)
controlQueue.send(ctrl)
elif key >= 0 and chr(key) in '34567890[]':
if key == ord('3'): control = 'awb_mode'
elif key == ord('4'): control = 'ae_comp'
elif key == ord('5'): control = 'anti_banding_mode'
elif key == ord('6'): control = 'effect_mode'
elif key == ord('7'): control = 'brightness'
elif key == ord('8'): control = 'contrast'
elif key == ord('9'): control = 'saturation'
elif key == ord('0'): control = 'sharpness'
elif key == ord('['): control = 'luma_denoise'
elif key == ord(']'): control = 'chroma_denoise'
print("Selected control:", control)
elif key in [ord('-'), ord('_'), ord('+'), ord('=')]:
change = 0
if key in [ord('-'), ord('_')]: change = -1
if key in [ord('+'), ord('=')]: change = 1
ctrl = dai.CameraControl()
if control == 'none':
print("Please select a control first using keys 3..9 0 [ ]")
elif control == 'ae_comp':
ae_comp = clamp(ae_comp + change, -9, 9)
print("Auto exposure compensation:", ae_comp)
ctrl.setAutoExposureCompensation(ae_comp)
elif control == 'anti_banding_mode':
abm = next(anti_banding_mode)
print("Anti-banding mode:", abm)
ctrl.setAntiBandingMode(abm)
elif control == 'awb_mode':
awb = next(awb_mode)
print("Auto white balance mode:", awb)
ctrl.setAutoWhiteBalanceMode(awb)
elif control == 'effect_mode':
eff = next(effect_mode)
print("Effect mode:", eff)
ctrl.setEffectMode(eff)
elif control == 'brightness':
brightness = clamp(brightness + change, -10, 10)
print("Brightness:", brightness)
ctrl.setBrightness(brightness)
elif control == 'contrast':
contrast = clamp(contrast + change, -10, 10)
print("Contrast:", contrast)
ctrl.setContrast(contrast)
elif control == 'saturation':
saturation = clamp(saturation + change, -10, 10)
print("Saturation:", saturation)
ctrl.setSaturation(saturation)
elif control == 'sharpness':
sharpness = clamp(sharpness + change, 0, 4)
print("Sharpness:", sharpness)
ctrl.setSharpness(sharpness)
elif control == 'luma_denoise':
luma_denoise = clamp(luma_denoise + change, 0, 4)
print("Luma denoise:", luma_denoise)
ctrl.setLumaDenoise(luma_denoise)
elif control == 'chroma_denoise':
chroma_denoise = clamp(chroma_denoise + change, 0, 4)
print("Chroma denoise:", chroma_denoise)
ctrl.setChromaDenoise(chroma_denoise)
controlQueue.send(ctrl)
# ===> ToF Additional device close
device.close()
# <=== ToF Additional device close``