Hi jakaskerl,
Can you please provide me an example, when I try to setSize for ImgFrame I get error about wrong input params. Also i found this approach https://discuss.luxonis.com/d/331-send-array-to-device-using-xlink/2, but I get this error Input tensor 'nchw_input' (0) exceeds available data range. Data size (1228800B), tensor offset (0), size (2457600B) - skipping inference
Code example:
# Create pipeline
pipeline = dai.Pipeline()
camRgb = pipeline.create(dai.node.ColorCamera)
camRgb.setPreviewSize(640, 640)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
camRgb.setFp16(True) # Model requires FP16 input
# NN that detects faces in the image
nn = pipeline.create(dai.node.NeuralNetwork)
nn.setBlobPath(nnPath)
nn.setNumInferenceThreads(2)
nn.input.setBlocking(True)
xinArray = pipeline.createXLinkIn()
nnOut = pipeline.createXLinkOut()
xinArray.setStreamName("inArray")
nnOut.setStreamName("nn")
xinArray.out.link(nn.input)
nn.out.link(nnOut.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
qIn = device.getInputQueue(name="inArray", maxSize=4, blocking=False)
qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
detections = []
# Load and preprocess the image
image = cv2.imread(image_path)
image = cv2.resize(image, (640, 640)) # Resize to model's input size
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = np.expand_dims(image, axis=0)
image = np.reshape(image, (1,3,640,640)).astype(dtype=np.float16)
image = image.flatten()
print(image.dtype)
data = dai.NNData()
data.setLayer("nchw_input", image.data)
while True:
qIn.send(data)
in_nn = qDet.tryGet()
if in_nn is not None:
# [print(f"Layer name: {l.name}, Type: {l.dataType}, Dimensions: {l.dims}") for l in inDet.getAllLayers()]
# Extract the output shape: (batch_size, channels, num_predictions)
boxes = np.array(in_nn.getLayerFp16('box')).reshape(1, 8400, 64).astype(dtype=np.float32)
classes = np.array(in_nn.getLayerFp16('class')).reshape(1, 8400, 3).astype(dtype=np.float32)
detections=[]
# print(classes)
result=decode_predictions(boxes, classes, image)
# print(result)
result_boxes=result["boxes"]
num_of_dects=result["num_detections"]
print("num_of_dects")
print(num_of_dects)
if result_boxes[0][0][0] !=-1.0:
detection = {
"label": 1,
"confidence": 0.1,
"box": result_boxes[0][0]}
detections.append(detection)
res = session.run(
output_names=[output_name0, output_name1],
input_feed={input_name: image}
)
result=decode_predictions(res[0], res[1], image)
# print(result)
result_boxes=result["boxes"]
num_of_dects=result["num_detections"]
print("num_of_dects onnx")
print(num_of_dects)
# cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
Best regards,
Aleks