jakaskerl
Here's an MRE of just saving images directly:
import cv2
import numpy as np
import depthai as dai
import time
#TODO: Figure out how to ensure the fps for the OAK camera has been changed to 60
def save_frames(output_folder):
pipeline = dai.Pipeline()
oakCam = pipeline.create(dai.node.ColorCamera)
print(str(oakCam.getFps()))
oakCam.setFps(60)
manip = pipeline.create(dai.node.ImageManip)
controlIn = pipeline.create(dai.node.XLinkIn)
manipOut = pipeline.create(dai.node.XLinkOut)
controlIn.setStreamName('control')
manipOut.setStreamName("preview")
topLeft = dai.Point2f(0.2, 0.2)
bottomRight = dai.Point2f(0.8, 0.8)
# to save the video using opencv
# cap = cv2.VideoCapture(0)
oakCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
manip.setMaxOutputFrameSize(12441600)
oakCam.video.link(manip.inputImage)
manip.out.link(manipOut.input)
controlIn.out.link(oakCam.inputControl)
with dai.Device(pipeline) as device:
controlQueue = device.getInputQueue(controlIn.getStreamName(),maxSize=30, blocking=False)
qPreview = device.getOutputQueue("preview", maxSize=30, blocking=False)
temp = dict()
fc = 0
while True:
img = qPreview.get()
fc += 1
frame_name = f"frame_{time.time()}.jpg"
output_path = f"{output_folder}/{frame_name}"
temp[frame_name] = img
if cv2.waitKey(1) == ord('q'):
break
print(fc)
for filename, f in temp.items():
frame = f.getCvFrame()
output_path = f"{output_folder}/{filename}"
cv2.imwrite(output_path,frame)
cv2.destroyAllWindows()
# Example usage
output_folder = f"./testPhotos" # change as needed
save_frames(output_folder)
Here's another MRE of a script to save videos and convert using ffmpeg and trying to save the frames as JPEGS:
##
#!/usr/bin/env python3
# Script to save videos in .h265 format from oak camera
import depthai as dai
import time
import os
import cv2
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and output
camRgb = pipeline.create(dai.node.ColorCamera)
ctrl = dai.CameraControl()
videoEnc = pipeline.create(dai.node.VideoEncoder)
xout = pipeline.create(dai.node.XLinkOut)
stillEnc = pipeline.create(dai.node.VideoEncoder)
xout.setStreamName('h265')
# Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
fps = 30
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
videoEnc.setDefaultProfilePreset(fps, dai.VideoEncoderProperties.Profile.H265_MAIN)
camRgb.setSensorCrop(0.2, 0.1) # By default 0.25, 0.25 for center crop
# Linking
camRgb.video.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
with dai.Device(pipeline) as device:
# Output queue will be used to get the encoded data from the output defined above
q = device.getOutputQueue(name="h265", maxSize=30, blocking=True)
# The .h265 file is a raw stream file (not playable yet)
curr = int(round(time.time()))
rawFile = rf'.\rawStreams\{curr}.h265'
convertedFile = rf'.\convertedStreams\{curr}.mp4'
temp = dict()
fc = 0
with open(rawFile, 'wb') as videoFile:
print("Press Ctrl+C to stop encoding...")
try:
while True:
h265Packet = q.get() # Blocking call, will wait until a new data has arrived
h265Packet.getData().tofile(videoFile) # Appends the packet data to the opened file
fc += 1
frame_name = f"frame_{int(time.time()*1000)}.jpeg"
temp[frame_name] = h265Packet
except KeyboardInterrupt:
# Keyboard interrupt (Ctrl + C) detected
pass
print(fc)
output_folder = rf".\testPhotos"
for filename, f in temp.items():
with open(filename, "wb") as fi:
fi.write(f.getData())
print('Image saved to', filename)
print("To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:")
# print("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4")
command = f"ffmpeg -framerate {fps} -i {rawFile} -c copy {convertedFile}"
os.system(command)
`