Hi gloria
You would basically have to use the interoperability feature to merge SDK and API.
camSocket = dai.CameraBoardSocket.RGB
def getMesh(calibData, ispSize):
M1 = np.array(calibData.getCameraIntrinsics(camSocket, ispSize[0], ispSize[1]))
d1 = np.array(calibData.getDistortionCoefficients(camSocket))
R1 = np.identity(3)
mapX, mapY = cv2.initUndistortRectifyMap(M1, d1, R1, M1, ispSize, cv2.CV_32FC1)
meshCellSize = 16
mesh0 = []
# Creates subsampled mesh which will be loaded on to device to undistort the image
for y in range(mapX.shape[0] + 1): # iterating over height of the image
if y % meshCellSize == 0:
rowLeft = []
for x in range(mapX.shape[1]): # iterating over width of the image
if x % meshCellSize == 0:
if y == mapX.shape[0] and x == mapX.shape[1]:
rowLeft.append(mapX[y - 1, x - 1])
rowLeft.append(mapY[y - 1, x - 1])
elif y == mapX.shape[0]:
rowLeft.append(mapX[y - 1, x])
rowLeft.append(mapY[y - 1, x])
elif x == mapX.shape[1]:
rowLeft.append(mapX[y, x - 1])
rowLeft.append(mapY[y, x - 1])
else:
rowLeft.append(mapX[y, x])
rowLeft.append(mapY[y, x])
if (mapX.shape[1] % meshCellSize) % 2 != 0:
rowLeft.append(0)
rowLeft.append(0)
mesh0.append(rowLeft)
mesh0 = np.array(mesh0)
meshWidth = mesh0.shape[1] // 2
meshHeight = mesh0.shape[0]
mesh0.resize(meshWidth * meshHeight, 2)
mesh = list(map(tuple, mesh0))
return mesh, meshWidth, meshHeight
with OakCamera() as oak:
color = oak.create_camera('color', resolution='1080P', fps=10, encode='H265')
left = oak.create_camera('left', resolution='800p', fps=10, encode='H265')
right = oak.create_camera('right', resolution='800p', fps=10, encode='H265')
stereo = oak.create_stereo(left=left, right=right)
nn = oak.create_nn('mobilenet-ssd', color, spatial=stereo)
calibData = oak.device.readCalibration()
pipeline = oak.build()
manip = pipeline.create(dai.node.ImageManip)
mesh, meshWidth, meshHeight = getMesh(calibData, color.node.getIspSize())
manip.setWarpMesh(mesh, meshWidth, meshHeight)
manip.setMaxOutputFrameSize(color.node.getIspWidth() * color.node.getIspHeight() * 3 // 2)
color.node.isp.link(manip.inputImage)
cam_xout = pipeline.create(dai.node.XLinkOut)
cam_xout.setStreamName("Undistorted")
manip.out.link(cam_xout.input)
dist_xout = pipeline.create(dai.node.XLinkOut)
dist_xout.setStreamName("Distorted")
color.node.isp.link(dist_xout.input)
The problem you now have is that recording feature doesn't work as it usually does in the SDK. You will have to manually record the streams.
like done here
I suggest you don't undistort the camera first, perhaps you could do that when replaying the recordings. SDK doesn't support this feature currently.
Thanks,
Jaka