Hi luxd
Sorry, I meant to say run only the camera without NN inferencing and such. Could you try this script?
#!/usr/bin/env python3
import cv2
import depthai as dai
import datetime
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
xoutRgb = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName('rgb')
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_720_P)
camRgb.setFps(90)
camRgb.video.link(xoutRgb.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues will be used to get the grayscale frames from the outputs defined above
prev_ts = datetime.timedelta(seconds=0)
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
while True:
# Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
inRgb = qRgb.tryGet()
if inRgb is not None:
# calculate fps
fps = 1.0 / (inRgb.getTimestamp() - prev_ts).total_seconds()
print("rgb camera fps: {:.1f}".format(fps))
prev_ts = inRgb.getTimestamp()
cv2.imshow("rgb", inRgb.getCvFrame())
if cv2.waitKey(1) == ord('q'):
break
Thanks,
Jaka