Sorry about that...here is the pipeline setup and threads used. The problem with detections and passthroughs not seeming to be synchronized happens with both MobileNets.
pipeline = depthai.Pipeline()
cam_rgb = pipeline.create(depthai.node.ColorCamera) # create color camera object
cam_rgb.setPreviewSize(576, 576) # set camera preview size
cam_rgb.setInterleaved(False)
cam_rgb.initialControl.setManualFocus(110)
det_nn = pipeline.createMobileNetDetectionNetwork() # create tattoo detection mobilenet network
det_nn.setBlobPath("C:\\Luxonis\\DETECT_BLOBS\\Detect_2_17_2022.blob") # configure path to blob
det_nn.setConfidenceThreshold(0.5) # set confidence threshold
det_nn.input.setQueueSize(1)
det_nn.input.setBlocking(False)
manipRgb = pipeline.createImageManip()
rgbRr = depthai.RotatedRect()
rgbRr.center.x, rgbRr.center.y = cam_rgb.getPreviewWidth() // 2, cam_rgb.getPreviewHeight() // 2
rgbRr.size.width, rgbRr.size.height = cam_rgb.getPreviewHeight(), cam_rgb.getPreviewWidth()
rgbRr.angle = 90
manipRgb.initialConfig.setCropRotatedRect(rgbRr, False)
cam_rgb.preview.link(manipRgb.inputImage)
manip = pipeline.createImageManip()
manip.initialConfig.setResize(300, 300)
manip.initialConfig.setFrameType(depthai.RawImgFrame.Type.RGB888p)
manipRgb.out.link(manip.inputImage)
manip.out.link(det_nn.input)
cam_xout = pipeline.createXLinkOut()
cam_xout.setStreamName("cam_out")
manipRgb.out.link(cam_xout.input)
rec_nn = pipeline.createMobileNetDetectionNetwork() # create tattoo ocr mobilenet network
rec_nn.setBlobPath("C:\\Luxonis\\READ_BLOBS\\read_2_16_2022.blob") # configure path to blob
rec_nn.setConfidenceThreshold(0.4) # set confidence threshold
rec_nn.input.setQueueSize(1)
rec_nn.input.setBlocking(False)
rec_xin = pipeline.createXLinkIn()
rec_xin.setStreamName("rec_in")
rec_xin.out.link(rec_nn.input)
det_nn_xout = pipeline.createXLinkOut()
det_nn_xout.setStreamName("det_nn")
det_nn.out.link(det_nn_xout.input)
det_pass = pipeline.createXLinkOut()
det_pass.setStreamName("det_pass")
det_nn.passthrough.link(det_pass.input)
rec_xout = pipeline.createXLinkOut()
rec_xout.setStreamName("rec_nn")
rec_nn.out.link(rec_xout.input)
rec_pass = pipeline.createXLinkOut()
rec_pass.setStreamName("rec_pass")
rec_nn.passthrough.link(rec_pass.input)
def detect_thread(det_queue, det_pass, rec_queue):
global tattoo_detections, tat_last_seq, tat_last_img
while running:
try:
in_det = det_queue.get().detections
in_pass = det_pass.get()
orig_frame = frame_seq_map.get(in_pass.getSequenceNum(), None)
tat_last_img = orig_frame
if orig_frame is None:
continue
tat_last_seq = in_pass.getSequenceNum()
tattoo_detections = in_det
for detection in tattoo_detections:
bbox = frameNorm(orig_frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cropped_frame = orig_frame[bbox[1] - offset:bbox[3] + offset, bbox[0] - offset:bbox[2] + offset]
shape = cropped_frame.shape
if shape[0] > 0 and shape[1] > 0:
tstamp = time.monotonic()
img = depthai.ImgFrame()
img.setTimestamp(tstamp)
img.setType(depthai.RawImgFrame.Type.BGR888p)
img.setData(to_planar(cropped_frame, (300, 300)))
img.setWidth(300)
img.setHeight(300)
rec_queue.send(img)
fps.tick('detect')
except RuntimeError:
continue
def rec_thread(q_rec, q_pass):
global rec_results, decoded_text
while running:
try:
# Get detections from queue of cropped frames from tattoo detection nn
rec_data = q_rec.get().detections
rec_frame = q_pass.get().getCvFrame()
seq = q_pass.get().getSequenceNum()
char_detections = [detection for detection in rec_data]
except RuntimeError:
continue
decoded_text = ''
# Create list of detections xmin position and detection label
for detection in rec_data:
bbox = frameNorm(rec_frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.rectangle(rec_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (205, 0, 0), 2)
cv2.putText(rec_frame, '{} ({}%)'.format(labels[detection.label], int(detection.confidence * 100)), (bbox[0] - 10, bbox[1] - 20), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (205, 0, 0))
# Create result image to stack
rec_results = [(cv2.resize(rec_frame, (300, 300)), decoded_text)] + rec_results[:9]
fps.tick('OCR')
with depthai.Device(pipeline) as device:
cam_out = device.getOutputQueue("cam_out", 1, True)
rec_in = device.getInputQueue("rec_in")
det_nn = device.getOutputQueue("det_nn", 1, False)
det_pass = device.getOutputQueue("det_pass", 1, False)
rec_nn = device.getOutputQueue("rec_nn", 1, False)
rec_pass = device.getOutputQueue("rec_pass", 1, False)
det_t = threading.Thread(target=detect_thread, args=(det_nn, det_pass, rec_in))
det_t.start()
rec_t = threading.Thread(target=rec_thread, args=(rec_nn, rec_pass))
rec_t.start()