Recently, I've been attempting to implement a callback function and leverage multithreading to enhance data transmission efficiency (asynchronous).
However, I've encountered some bottlenecks.
I anticipated achieving an output of the grayscale image on the right at 90FPS and depth map information at 60FPS.
However, it seems that my current implementation is not meeting these goals.
Is there an error in my implementation, or is this the current limitation?
Thank you for reviewing.
#include <iostream>
#include <queue>
#include <future>
#include <mutex>
#include "depthai/depthai.hpp"
struct callbackType {
std::string name;
cv::Mat frame;
};
int main() {
dai::Pipeline pipeline;
auto right = pipeline.create<dai::node::MonoCamera>();
auto left = pipeline.create<dai::node::MonoCamera>();
auto xoutRight = pipeline.create<dai::node::XLinkOut>();
xoutRight->setStreamName("right");
auto xoutstereoDepth = pipeline.create<dai::node::XLinkOut>();
xoutstereoDepth->setStreamName("depth");
right->setCamera("right");
right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
right->setFps(90);
left->setCamera("left");
left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
left->setFps(90);
auto stereoDepth = pipeline.create<dai::node::StereoDepth>();
stereoDepth->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::HIGH_DENSITY);
stereoDepth->initialConfig.setMedianFilter(dai::MedianFilter::KERNEL_7x7);
stereoDepth->setLeftRightCheck(true);
stereoDepth->setExtendedDisparity(true);
stereoDepth->setSubpixel(true);
left->out.link(stereoDepth->left);
right->out.link(stereoDepth->right);
stereoDepth->disparity.link(xoutstereoDepth->input);
right->out.link(xoutRight->input);
auto depthqueue = std::queue<callbackType>();
std::mutex depthqueueMtx;
auto rightqueue = std::queue<callbackType>();
std::mutex rightqueueMtx;
dai::Device device(pipeline);
auto depthFuture = std::async(std::launch::async, [&device, &depthqueueMtx, &depthqueue]() {
while (true) {
std::shared_ptr<dai::ADatatype> callback = device.getOutputQueue("depth", 4, false)->get();
if (dynamic_cast<dai::ImgFrame*>(callback.get()) != nullptr) {
std::unique_lock<std::mutex> lock(depthqueueMtx);
callbackType cb;
dai::ImgFrame* imgFrame = static_cast<dai::ImgFrame*>(callback.get());
cb.frame = imgFrame->getCvFrame();
depthqueue.push(cb);
}
}
});
auto rightFuture = std::async(std::launch::async, [&device, &rightqueueMtx, &rightqueue]() {
while (true) {
std::shared_ptr<dai::ADatatype> callback = device.getOutputQueue("right", 4, false)->get();
if (dynamic_cast<dai::ImgFrame*>(callback.get()) != nullptr) {
std::unique_lock<std::mutex> lock(rightqueueMtx);
callbackType cb;
dai::ImgFrame* imgFrame = static_cast<dai::ImgFrame*>(callback.get());
cb.frame = imgFrame->getCvFrame();
rightqueue.push(cb);
}
}
});
int count0 = 0;
int count1 = 0;
auto startTimer = std::chrono::high_resolution_clock::now();
while (true) {
int key = cv::waitKey(1);
callbackType leftdata;
{
std::unique_lock<std::mutex> lock(depthqueueMtx);
if (!depthqueue.empty()) {
leftdata = depthqueue.front();
depthqueue.pop();
}
}
callbackType rightdata;
{
std::unique_lock<std::mutex> lock(rightqueueMtx);
if (!rightqueue.empty()) {
rightdata = rightqueue.front();
rightqueue.pop();
}
}
if (!leftdata.frame.empty()) {
count0 += 1;
}
if (!rightdata.frame.empty()) {
count1 += 1;
}
auto endTimer = std::chrono::high_resolution_clock::now();
auto durationTimer = std::chrono::duration_cast<std::chrono::seconds>(endTimer - startTimer);
if (durationTimer.count() >= 10)
{
std::cout << "Depth: " << count0 << std::endl;
std::cout << "Right: " << count1 << std::endl;
break;
}
}
depthFuture.wait();
rightFuture.wait();
return 0;
}