I am facing trouble with visualizing the output and I don’t know how to fix it.
import segmentation_models_pytorch as smp
model_smp_mv3= smp.DeepLabV3Plus(encoder_name='timm-mobilenetv3_large_minimal_100',encoder_weights='imagenet’)
torch.onnx.export(model_smp_mv3, dummy_input,save_filepath, opset_version=12, do_constant_folding=False)
import blobconverter
blob_path = blobconverter.from_onnx(
model="smp_mv3.onnx",
optimizer_params=["--data_type=FP16", "--input_shape=[1,3,512, 512]"],
output_dir=“oakd_models/",
shaves=10,
)
with dai.Device(pipeline) as device:
print("connected to the device.")
dev_in = device.getInputQueue("input")
dev_out = device.getOutputQueue("output",maxSize = 1,blocking = False)
frame = dai.ImgFrame()
frame.setData(to_planar(img2,(nn_shape,nn_shape)))
frame.setType(dai.RawImgFrame.Type.RGB888p)
frame.setWidth(nn_shape)
frame.setHeight(nn_shape)
dev_in.send(frame)
print(frame.getHeight(),frame.getWidth())
data_o = dev_out.get()
layers = data_o.getAllLayers()
for layer_nr, layer in enumerate(layers):
print(f"Layer {layer_nr}")
print(f"Name: {layer.name}")
print(f"Order: {layer.order}")
print(f"dataType: {layer.dataType}")
dims = layer.dims # reverse dimensions
print(f"dims: {dims}”)
Layer2 = data_o.getLayerFp16(layers[0].name)
layer2 = np.array(layer2).reshape(dims)
seg_output = layer2.squeeze(0)
seg_output = np.transpose(seg_output, (1,2,0))
Could you tell me how to get the correct visualization? Should I add scaling? Or convert to BGR?
Thank you