Hi @jakob, I was able to export the blob model and it gave me onnx model and config json.
from onnx I was able to convert to blob using (https://blobconverter.luxonis.com/)
but the model is rarely detecting anything (Maybe I need to increase the dataset. currently its 250 images).
I have attached the .pt , onnx and converted blob model for reference.
Also when I referred this link (luxonis/depthai-pythonblob/main/examples/Yolo/tiny_yolo.py
) for testing the yolov5 converted blob model the oak-1 crashes with following log
'
import cv2
import depthai as dai
import numpy as np
Configuration
MODEL_PATH = 'blobs/correct.blob'
ANCHORS = [
10.0, 13.0, 16.0, 30.0, 33.0, 23.0,
30.0, 61.0, 62.0, 45.0, 59.0, 119.0,
116.0, 90.0, 156.0, 198.0, 373.0, 326.0
]
ANCHOR_MASKS = {
"side52": [0, 1, 2],
"side26": [3, 4, 5],
"side13": [6, 7, 8]
}
CONF_THRESHOLD = 0.3
IOU_THRESHOLD = 0.2
NUM_CLASSES = 1
VIDEO_SIZE = (416, 416)
SENSOR_RES = dai.ColorCameraProperties.SensorResolution.THE_4_K
ISP_SCALE = (1, 1)
FPS = 30
DEVICE_MAC = "1844301091F11CF500"
def frame_norm(frame, bbox):
"""Normalize YOLO bbox coords to image pixels."""
h, w = frame.shape[:2]
return (
int(bbox.xmin * w), int(bbox.ymin * h),
int(bbox.xmax * w), int(bbox.ymax * h)
)
def create_pipeline():
pipeline = dai.Pipeline()
# Color camera
cam = pipeline.create(dai.node.ColorCamera)
cam.setResolution(SENSOR_RES)
cam.setIspScale(*ISP_SCALE)
cam.setVideoSize(*VIDEO_SIZE)
cam.setInterleaved(False)
cam.setPreviewSize(416, 416)
cam.setPreviewKeepAspectRatio(False)
cam.setFps(FPS)
# YOLO detection network
nn = pipeline.create(dai.node.YoloDetectionNetwork)
nn.setBlobPath(MODEL_PATH)
nn.setConfidenceThreshold(CONF_THRESHOLD)
nn.setNumClasses(NUM_CLASSES)
nn.setIouThreshold(IOU_THRESHOLD)
nn.setAnchors(ANCHORS)
nn.setAnchorMasks(ANCHOR_MASKS)
nn.setCoordinateSize(4)
nn.setNumInferenceThreads(2)
nn.input.setBlocking(False)
# Outputs
xout_cam = pipeline.create(dai.node.XLinkOut)
xout_nn = pipeline.create(dai.node.XLinkOut)
xout_pass = pipeline.create(dai.node.XLinkOut)
xout_cam.setStreamName("rgb")
xout_nn.setStreamName("detections")
xout_pass.setStreamName("pass")
# Link nodes
cam.preview.link(nn.input)
nn.out.link(xout_nn.input)
nn.passthrough.link(xout_pass.input)
if True: # sync NN + frames
nn.passthrough.link(xout_cam.input)
else:
cam.preview.link(xout_cam.input)
return pipeline
def main():
pipeline = create_pipeline()
with dai.Device(pipeline, dai.DeviceInfo(DEVICE_MAC)) as device:
q_rgb = device.getOutputQueue("rgb", maxSize=4, blocking=False)
q_det = device.getOutputQueue("detections", maxSize=4, blocking=False)
while True:
in_rgb = q_rgb.get() # blocking
in_det = q_det.tryGet()
frame = in_rgb.getCvFrame()
# Draw detections
if in_det:
for det in in_det.detections:
x1, y1, x2, y2 = frame_norm(frame, det)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imshow("YOLO", frame)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
if name == "main":
main()
'
with dai.Device(pipeline, dai.DeviceInfo(DEVICE_MAC)) as device:
Stack trace (most recent call last):
#31 Object "", at 00007FFD3AED99B8, in PyInit_depthai
#30 Object "", at 00007FFD3AED908B, in PyInit_depthai
#29 Object "", at 00007FFD3AEDD8DD, in PyInit_depthai
#28 Object "", at 00007FFD3AED68FE, in PyInit_depthai
#27 Object "", at 00007FFD3AEA3344, in PyInit_depthai
#26 Object "", at 00007FFD3AEB9CC1, in PyInit_depthai
#25 Object "", at 00007FFD3AEB82AB, in PyInit_depthai
#24 Object "", at 00007FFE28F73C66, in RtlCaptureContext2
#23 Object "", at 00007FFD3B04F86E, in PyInit_depthai
#22 Object "", at 00007FFD3B058B40, in PyInit_depthai
#21 Object "", at 00007FFD3B0928B4, in PyInit_depthai
#20 Object "", at 00007FFD3B04CB8F, in PyInit_depthai
#19 Object "", at 00007FFE262B565C, in RaiseException
#18 Object "", at 00007FFE28F24475, in RtlRaiseException
#17 Object "", at 00007FFE28EEE466, in RtlFindCharInUnicodeString
#16 Object "", at 00007FFE28F7441F, in _chkstk
#15 Object "", at 00007FFD3B04BE25, in PyInit_depthai
#14 Object "", at 00007FFD3B04F3F1, in PyInit_depthai
#13 Object "", at 00007FFD3B04F38C, in PyInit_depthai
#12 Object "", at 00007FFD3B04E581, in PyInit_depthai
#11 Object "", at 00007FFD3B04DD09, in PyInit_depthai
#10 Object "", at 00007FFD3B04BA32, in PyInit_depthai
#9 Object "", at 00007FFE28EEFD54, in RtlUnwindEx
#8 Object "", at 00007FFE28F7449F, in _chkstk
#7 Object "", at 00007FFD3B04A84C, in PyInit_depthai
#6 Object "", at 00007FFD3B04BE25, in PyInit_depthai
#5 Object "", at 00007FFD3B04F3F1, in PyInit_depthai
#4 Object "", at 00007FFD3B04F2B9, in PyInit_depthai
#3 Object "", at 00007FFD3B04B489, in PyInit_depthai
#2 Object "", at 00007FFD3B050017, in PyInit_depthai
#1 Object "", at 00007FFD3B058BAA, in PyInit_depthai
#0 Object "", at 00007FFD3AEB72F4, in PyInit_depthai
could you give me updated or fixed version of the code to test the blob model before directly running in android device.
Also let me know if I am doing something wrong.
Note: the existing model is working fine from the android code repo. And once I had received some detections from the
custom model that I had built the way I mentioned above.
Thanks
Blob
Config json
Onnx
Pt