here is my code
#include "StereoCamera.hpp"
StereoCamera::StereoCamera() {
logFile.open("log/StereoCamera.txt");
if (!logFile.is_open()) {
std::cerr << "로그 파일을 열 수 없습니다." << std::endl;
return;
}
initThreadPtr = new std::thread(&StereoCamera::initCamera, this);
initThreadPtr->detach();
}
StereoCamera::~StereoCamera() {
}
void StereoCamera::initCamera() {
logFile << __func__ << std::endl;
pipeline = std::make_shared<dai::Pipeline>();
device = std::make_shared<dai::Device>();
camRGB = pipeline->create<dai::node::ColorCamera>();
camLeft = pipeline->create<dai::node::MonoCamera>();
camRight = pipeline->create<dai::node::MonoCamera>();
stereo = pipeline->create<dai::node::StereoDepth>();
camRGB->setBoardSocket(dai::CameraBoardSocket::CAM_A);
camRGB->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
camRGB->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
try {
auto calibData = device->readCalibration2();
auto lensPosition = calibData.getLensPosition(dai::CameraBoardSocket::CAM_A);
if (lensPosition){
camRGB->initialControl.setManualFocus(lensPosition);
}
} catch (const std::exception& e) {
logFile << __func__ << e.what() << std::endl;
}
camLeft->setBoardSocket(dai::CameraBoardSocket::CAM_B);
camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
camRight->setBoardSocket(dai::CameraBoardSocket::CAM_C);
camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::HIGH_DENSITY);
stereo->setLeftRightCheck(true);
stereo->setSubpixel(true);
stereo->initialConfig.setMedianFilter(dai::MedianFilter::KERNEL_7x7);
stereo->setDepthAlign(dai::CameraBoardSocket::CAM_A);
stereo->setOutputSize(camLeft->getResolutionWidth(), camLeft->getResolutionHeight());
xoutRGB = pipeline->create<dai::node::XLinkOut>();
xoutDisp = pipeline->create<dai::node::XLinkOut>();
xoutDepth = pipeline->create<dai::node::XLinkOut>();
xoutRGB->setStreamName("rgb");
xoutDisp->setStreamName("disp");
xoutDepth->setStreamName("depth");
camRGB->isp.link(xoutRGB->input);
camLeft->out.link(stereo->left);
camRight->out.link(stereo->right);
stereo->disparity.link(xoutDisp->input);
stereo->depth.link(xoutDepth->input);
nn = pipeline->create<dai::node::MobileNetDetectionNetwork>();
nn->setConfidenceThreshold((float)0.5);
nn->setBlobPath(nnBlobPath);
nn->setNumInferenceThreads(2);
// nn->input.setBlocking(false);
xinNN = pipeline->create<dai::node::XLinkIn>();
xinNN->setStreamName("nn_input");
xinNN->out.link(nn->input);
xoutNN = pipeline->create<dai::node::XLinkOut>();
xoutNN->setStreamName("nn");
nn->out.link(xoutNN->input);
device->startPipeline(*pipeline);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
auto rgbQueue = device->getOutputQueue("rgb", 4, false);
auto dispQueue = device->getOutputQueue("disp", 4, false);
auto depthQueue = device->getOutputQueue("depth", 4, false);
auto nninputQueue = device->getInputQueue("nn_input");
auto nnQueue = device->getOutputQueue("nn", 4, false);
cv::Mat rgbFrame;
cv::Mat rgbFrameResized;
while (true){
try{
auto ImgFrame = rgbQueue->tryGet<dai::ImgFrame>();
// auto ImgFrame = rgbQueue->get<dai::ImgFrame>();
if (ImgFrame != nullptr){
rgbFrame = ImgFrame->getCvFrame();
cv::Mat lic_frame_data(300, 300, CV_8UC3);
cv::resize(rgbFrame, lic_frame_data, cv::Size(300, 300));
//transpose, flatten (lic_frame_data)
cv::Mat transposed = lic_frame_data.reshape(1, 300 * 300 * 3);
std::vector<uchar> flattened;
transposed.copyTo(flattened);
dai::ImgFrame lic_frame;
lic_frame.setData(flattened);
lic_frame.setTimestamp(std::chrono::steady_clock::now());
lic_frame.setType(dai::RawImgFrame::Type::BGR888p);
lic_frame.setWidth(300);
lic_frame.setHeight(300);
nninputQueue->send(lic_frame);
auto inDet = nnQueue->get<dai::ImgDetections>();
auto detections = inDet->detections;
std::cout << "detections size: " << detections.size() << std::endl;
cv::resize(rgbFrame, rgbFrameResized, cv::Size(WIN_W, WIN_H));
cv::cvtColor(rgbFrameResized, rgbFrameResized, cv::COLOR_BGR2RGB);
std::unique_lock<std::mutex> lock(frameMutex);
currentFrame = rgbFrameResized.clone();
frameReady = true;
lock.unlock();
}
std::this_thread::sleep_for(std::chrono::milliseconds(1));
} catch (const std::exception& e) {
std::cerr << __func__ << e.what() << std::endl;
logFile << __func__ << e.what() << std::endl;
}
}
}
cv::Mat
StereoCamera::getCurrentFrame() {
try{
std::unique_lock<std::mutex> lock(frameMutex);
if (!currentFrame.empty()){
returnFrame = currentFrame.clone();
}
lock.unlock();
} catch (const std::exception& e) {
std::cerr << __func__ << e.what() << std::endl;
logFile << __func__ << e.what() << std::endl;
}
frameReady = false;
return returnFrame;
}
bool
StereoCamera::isFrameReady() {
return frameReady;
}
is there any wrong configuration?
error message is
initCameraCommunication exception - possible device error/misconfiguration. Original message 'Couldn't read data from stream: 'rgb' (X-*LINK-*ERROR)
what should i do ?
i want nnDetection on isp with depth alignment
maybe 2~3 minute work well but, after that time, error is appear
thanks for your help