Hi,
I am working on a project using the OAK-D camera and facing an issue with linking nodes in the pipeline. My goal is to classify the key points detected by the camera for different yoga poses using a custom neural network. Specifically, I'm trying to pass the output of a landmark detection neural network to my pose classification neural network as input.
The issue is that when I use pc_nn.out.link(manager_script.inputs['from_pc_nn']) to link the pose classification output to the manager_script input, the pipeline starts running and a new window opens showing the key points being detected. However, a few seconds later the program stops working and I'm not able to classify the key points.
If I don't use this link pc_nn.out.link(manager_script.inputs['from_pc_nn']), the pipeline works fine (detecting and marking landmarks).
Here is the code of the pipeline:
` def create_pipeline(self):
print("Creating pipeline...")
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_4)
self.pd_input_length = 224
self.lm_input_length = 256
self.pc_input_length = 66
# ColorCamera
print("Creating Color Camera...")
cam = pipeline.create(dai.node.ColorCamera)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setIspScale(self.scale_nd[0], self.scale_nd[1])
cam.setFps(self.internal_fps)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
if self.crop:
cam.setVideoSize(self.frame_size, self.frame_size)
cam.setPreviewSize(self.frame_size, self.frame_size)
else:
cam.setVideoSize(self.img_w, self.img_h)
cam.setPreviewSize(self.img_w, self.img_h)
if not self.laconic:
cam_out = pipeline.create(dai.node.XLinkOut)
cam_out.setStreamName("cam_out")
cam_out.input.setQueueSize(1)
cam_out.input.setBlocking(False)
cam.video.link(cam_out.input)
#Define manager script node
manager_script = pipeline.create(dai.node.Script)
manager_script.setScript(self.build_manager_script())
#Define pose classification pre processing
print("Creating Pose Classification pre processing image manip...")
pre_pc_manip = pipeline.create(dai.node.ImageManip)
pre_pc_manip.setMaxOutputFrameSize(self.pc_input_length*self.pc_input_length*3)
pre_pc_manip.setWaitForConfigInput(True)
pre_pc_manip.inputImage.setQueueSize(1)
pre_pc_manip.inputImage.setBlocking(False)
cam.preview.link(pre_pc_manip.inputImage)
manager_script.outputs['pre_pd_manip_cfg'].link(pre_pd_manip.inputConfig)
#Define link to send result to host
manager_out = pipeline.create(dai.node.XLinkOut)
manager_out.setStreamName("manager_out")
manager_script.outputs['host'].link(manager_out.input)
#Define landmark pre processing image manip
print("Creating Landmark pre processing image manip...")
pre_lm_manip = pipeline.create(dai.node.ImageManip)
pre_lm_manip.setMaxOutputFrameSize(self.lm_input_length*self.lm_input_length*3)
pre_lm_manip.setWaitForConfigInput(True)
pre_lm_manip.inputImage.setQueueSize(1)
pre_lm_manip.inputImage.setBlocking(False)
cam.preview.link(pre_lm_manip.inputImage)
manager_script.outputs['pre_lm_manip_cfg'].link(pre_lm_manip.inputConfig)
print("Creating DiveideBy255 Neural Network...")
divide_nn = pipeline.create(dai.node.NeuralNetwork)
divide_nn.setBlobPath(self.divide_by_255_model)
pre_lm_manip.out.link(divide_nn.input)
#Define landmark model
print("Creating Landmark Neural Network...")
lm_nn = pipeline.create(dai.node.NeuralNetwork)
lm_nn.setBlobPath(self.lm_model)
# lm_nn.setNumInferenceThreads(1)
divide_nn.out.link(lm_nn.input)
lm_nn.out.link(manager_script.inputs['from_lm_nn'])
#Define Pose Classify model
print("Creating Pose Classify Neural Network...")
pc_nn = pipeline.create(dai.node.NeuralNetwork)
pc_nn.setBlobPath(self.pc_model)
pre_pc_manip.out.link(pc_nn.input)
lm_nn.out.link(pc_nn.input)
pc_nn.out.link(manager_script.inputs['from_pc_nn'])
print("Pipeline created.")
return pipeline `
If I don't link this, the pipeline works fine and just detects and landmarks key points. However, I want to classify these key points to identify the yoga poses.
Could someone please help me understand what might be causing this issue and how I can modify my code to link the output of my custom node to the input of the manager script?
This is basically capturing the information from the manager_script node
I need to classify the detected landmarks/key points into specific yoga poses. To do this, I'm trying to pass the landmark output to feed it into the pose classify neural network. However, I'm having trouble making this work. I'm not sure what changes I need to make to my node to enable it to send data to the manager-script node, which in turn would feed the data to the pose classify network.
` def next_frame(self):
self.fps.update()
if self.laconic:
video_frame = np.zeros((self.frame_size, self.frame_size, 3), dtype=np.uint8)
else:
in_video = self.q_video.get()
video_frame = in_video.getCvFrame()
#Get result from device
res = marshal.loads(self.q_manager_out.get().getData()) #<- Here information from manager_script captures and use for drawing the landmark
if res["type"] != 0 and res["lm_score"] > self.lm_score_thresh:
body = mpu.Body()
body.rect_x_center_a = res["rect_center_x"] * self.frame_size
body.rect_y_center_a = res["rect_center_y"] * self.frame_size
body.rect_w_a = body.rect_h_a = res["rect_size"] * self.frame_size
body.rotation = res["rotation"]
body.rect_points = mpu.rotated_rect_to_points(body.rect_x_center_a, body.rect_y_center_a, body.rect_w_a, body.rect_h_a, body.rotation)
body.lm_score = res["lm_score"]
self.lm_postprocess(body, res['lms'], res['lms_world'])
if self.xyz:
if res['xyz_ref'] == 0:
body.xyz_ref = None
else:
if res['xyz_ref'] == 1:
body.xyz_ref = "mid_hips"
else: # res['xyz_ref'] == 2:
body.xyz_ref = "mid_shoulders"
body.xyz = np.array(res["xyz"])
if self.smoothing:
body.xyz = self.filter_xyz.apply(body.xyz)
body.xyz_zone = np.array(res["xyz_zone"])
body.xyz_ref_coords_pixel = np.mean(body.xyz_zone.reshape((2,2)), axis=0)
`
Thanks for your help in advance!