Hi Jaka,
Thanks for the help. Will try the suggestions on the FPS and queue size.
I still can't get triggering the TOF sensor directly to work. Can you share the code you used to test yourself? Or please check the code below; I tried to make it the most minimal combination of the tof_depth example and the external trigger example. This code for me just gives me continuous output with a warning "[warning] Device configured to generate sync, but no camera with sync output capability was started"
The triggering works if I change TOF to a MonoCamera.
#include <iostream>
#include <chrono>
#include <opencv2/opencv.hpp>
#include <depthai/depthai.hpp>
#include <windows.h>
#include <vector>
#include <string>
#include "depthai-shared/datatype/RawToFConfig.hpp"
#include "depthai/pipeline/datatype/ToFConfig.hpp"
#include <mutex>
#include <filesystem>
#include <stdexcept>
// Define the shared pointers and other variables
std::shared_ptr<dai::DataOutputQueue> qDepth;
dai::Pipeline pipeline;
std::shared_ptr<dai::node::ToF> tof;
dai::RawToFConfig tofConfig;
std::shared_ptr<dai::Device> device;
std::shared_ptr<dai::ToFConfig> createConfig(dai::RawToFConfig configRaw) {
auto config = std::make_shared<dai::ToFConfig>();
config->set(std::move(configRaw));
return config;
}
cv::Mat getDepth() {
auto imgFrame = qDepth->get<dai::ImgFrame>(); // blocking call, will wait until new data has arrived
cv::Mat depthFrame = imgFrame->getFrame(true);
return depthFrame;
}
cv::Mat convertToByte(const cv::Mat& frameDepth) {
cv::Mat invalidMask = (frameDepth == 0);
cv::Mat depthFrameScaled;
try {
double minDepth = 100.0;
double maxDepth = minDepth + 255;
cv::Mat rawDepth;
frameDepth.convertTo(rawDepth, CV_32F);
cv::medianBlur(rawDepth, rawDepth, 5);
rawDepth = (rawDepth - minDepth) / (maxDepth - minDepth) * 255;
rawDepth.convertTo(depthFrameScaled, CV_8UC1);
depthFrameScaled.setTo(cv::Scalar(0, 0, 0), invalidMask);
}
catch (const std::exception& e) {
depthFrameScaled = cv::Mat::zeros(frameDepth.size(), CV_8UC1);
}
return depthFrameScaled;
}
bool Init() {
printf("Initializing DepthAI\n");
tof = pipeline.create<dai::node::ToF>();
// Configure the ToF node
tofConfig = tof->initialConfig.get();
tofConfig.enableFPPNCorrection = true;
tofConfig.enableOpticalCorrection = true;
tofConfig.enableWiggleCorrection = false;
tofConfig.enablePhaseShuffleTemporalFilter = false;
tofConfig.enablePhaseUnwrapping = true;
tofConfig.phaseUnwrappingLevel = 1;
tofConfig.phaseUnwrapErrorThreshold = 300;
tofConfig.enableTemperatureCorrection = true;
printf("Configuring the ToF node\n");
auto xinTofConfig = pipeline.create<dai::node::XLinkIn>();
xinTofConfig->setStreamName("tofConfig");
xinTofConfig->out.link(tof->inputConfig);
tof->initialConfig.set(tofConfig);
auto camTof = pipeline.create<dai::node::Camera>();
camTof->setFps(60);
camTof->setBoardSocket(dai::CameraBoardSocket::CAM_A);
camTof->initialControl.setFrameSyncMode(dai::CameraControl::FrameSyncMode::INPUT);
camTof->initialControl.setExternalTrigger(1, 0);
camTof->raw.link(tof->input);
auto xoutDepth = pipeline.create<dai::node::XLinkOut>();
xoutDepth->setStreamName("depth");
tof->depth.link(xoutDepth->input);
xoutDepth->input.setBlocking(false);
xoutDepth->input.queueSize = 1;
tofConfig = tof->initialConfig.get();
device = std::make_shared<dai::Device>(pipeline);
std::cout << "Connected cameras: " << device->getConnectedCameraFeatures().size() << std::endl;
qDepth = device->getOutputQueue("depth", 1, false);
return true;
}
int main(int argc, char* argv[]) {
if (!Init())
return -1;
printf("Completed init\n");
int counter = 0;
while (true) {
auto start = std::chrono::high_resolution_clock::now();
if (qDepth->has()) {
cv::Mat depthFrameScaled = convertToByte(getDepth());
cv::imshow("Depth", depthFrameScaled);
}
else {
printf(".");
}
int key = cv::waitKey(1);
if (key == 'q') {
break;
}
counter++;
}
return 0;
}