Hey there, here's what I have. I know there's a lot of other scripts connected to this one, and I have no idea how it works since I'm not the one who wrote it. If you need something else just tell me and I'll see what I can do.
Just as an update, what I observe is that every minute or so, the python script "inference_oneFiledebug.py" is called (and for some reason there's almost always 3 or 4 instances called almost at the same time), and as soon as one of the instances is called off (i.e. I see it disappear from htop) then the connection (as seen with the ping command) with the camera dies for a few seconds, then it comes back. After a while though it doesn't come back, I have to unplug and replug it.
Here's the code inside the script "inference_oneFile_debug.py" :
from pathlib import Path
import cv2
import depthai
# import blobconverter
import numpy as np
import time
import argparse
import copy
import pickle
from image_manip import *
import project_parser
from collections import Counter
#import pickle
"""
THIS IS A MODIFICATION OF INFERENCE_ONEFRAME.PY
ESSENTIALLY, THE FEATURE WHERE THE MOST COMMON NUMBER OF DETECTION IS KEPT WAS BROKEN
NOW, WE ONLY KEEP THE FRAME WITH LESS BLUR
Script takes a video stream with the camera and keeps one frame.
INPUT:
Many arguments passed (organized below with parser) can affect the detection success,
they must be tweaked for the particular situation.
Don't pass "-sho" to show output with odroid, it won't work
OUTPUT:
- One raw frame (...raw.jpg)
- Location/size of the detection boxes, if any (...detections.csv)
"""
"""
Possible new features
- Data analysis: vary the confidence threshold to find an optimal level
"""
# ==============================================================================
# Parameters
# ==============================================================================
HOME_PATH = str(Path.home())
# Extracts commandLine argument
# e.g. passing python3 name_of_the_mainScript.py -fps 18 will set FPS = 18
args = project_parser.project_parser(argparse.ArgumentParser())
FPS = args.frames_per_second
STREAM_DURATION = args.stream_duration
EARLY_STOP = 0.75
LENS_FOCUS = args.lens_focus
OUTPUT_PATH = args.output_path
IMG_QUALITY = args.image_quality
CONF = args.confidence_threshold
BLUR_THRESHOLD = args.blur_threshold
RES = args.resolution
LUM_THRESHOLD = 230
# Given a specific BLOB, we need a specific WIdth and HEight for the frame we use
BLOB_NAME, WI, HE = "pedestrian-detection-adas-0002", 672, 384
# BLOB_NAME, WI, HE, FPS = 'mobilenet-ssd', 300, 300, 30
class streamHistory:
def __init__(
self,
detection_number_init=[],
detections_list_init=[],
blur_score_init=[],
lum_score_init=[],
frame_init=[],
):
self.detections_numbers = detection_number_init
self.blur_scores = blur_score_init
self.lum_scores = lum_score_init
self.frames = frame_init
self.detections_lists = detections_list_init
def update(self, frame, detections_list):
self.detections_numbers.append(len(detections_list))
self.blur_scores.append(getBlurScore(frame))
self.lum_scores.append(getLumScore(frame))
self.frames.append(frame)
self.detections_lists.append(detections_list)
def Most_Common(lst):
data = Counter(lst)
print(data)
return data.most_common(1)[0][0]
# ==============================================================================
# Setting color camera and neural network
# ==============================================================================
# Pipeline tells DepthAI what operations to perform when running
pipeline = depthai.Pipeline()
# Initialize color cam
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(WI, HE)
cam_rgb.setInterleaved(False)
cam_rgb.setFps(FPS)
# Choose resolution
if RES == 1080:
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
N_SHAVES = 6
elif RES == 2160:
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_4_K)
N_SHAVES = 5
elif RES == 3040:
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_12_MP)
N_SHAVES = 5
else:
print("Input resolution is invalid: Setting 1080P")
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
N_SHAVES = 6
# Depending on how the camera is set up, we might need to flip the image
# cam_rgb.setImageOrientation(depthai.CameraImageOrientation.HORIZONTAL_MIRROR)
# cam_rgb.setImageOrientation(depthai.CameraImageOrientation.VERTICAL_FLIP)
# cam_rgb.setImageOrientation(depthai.CameraImageOrientation.VERTICAL_FLIP)
if args.flip:
cam_rgb.setImageOrientation(depthai.CameraImageOrientation.HORIZONTAL_MIRROR)
# Initialize Neural network (which model to choose, confidence_threshold, etc.)
detection_nn = pipeline.createMobileNetDetectionNetwork()
# detection_nn.setBlobPath(str(blobconverter.from_zoo(name=BLOB_NAME, shaves=N_SHAVES)))
detection_nn.setBlobPath(
HOME_PATH
+ "/projets/Camera-PMR-2/resources/blobs/"
+ BLOB_NAME
+ "_openvino_2021.4_"
+ str(N_SHAVES)
+ "shave.blob"
)
# nn = pipeline.create(dai.node.NeuralNetwork)
detection_nn.setConfidenceThreshold(CONF)
# Outputs + Linking
cam_rgb.preview.link(detection_nn.input)
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)
# Control the camera
controlIn = pipeline.createXLinkIn()
controlIn.setStreamName("control")
controlIn.out.link(cam_rgb.inputControl)
# ==============================================================================
# Pedestrian detection (device is running, sending data via XLink)
# ==============================================================================
print("=== Starting pipeline =================================================")
with depthai.Device(pipeline) as device:
# To consume device results, output two queues from the device
controlQueue = device.getInputQueue("control")
ctrl = depthai.CameraControl()
if LENS_FOCUS == -1:
ctrl.setAutoFocusMode(depthai.RawCameraControl.AutoFocusMode.MACRO)
else:
ctrl.setManualFocus(LENS_FOCUS)
controlQueue.send(ctrl)
q_rgb = device.getOutputQueue("rgb", maxSize=4, blocking=False)
q_nn = device.getOutputQueue("nn", maxSize=4, blocking=False)
# Init the loop
frame, frame_backup = None, None
time_init = time.time()
s_history = streamHistory([], [], [], [])
while time.time() - time_init < STREAM_DURATION:
detections_list = []
frame = q_rgb.get().getCvFrame()
in_nn = q_nn.get()
if in_nn is not None:
detections = in_nn.detections
detections_list = [
[det.xmin, det.ymin, det.xmax, det.ymax, det.confidence]
for det in detections
]
s_history.update(frame, detections_list)
# Show the frames lives (by passing -sho)
if args.show_output:
# Stream history
if detections_list != []:
boxed_frame = copy.deepcopy(frame)
boxTheFrame(boxed_frame, detections_list)
cv2.imshow("Output", boxed_frame)
else:
cv2.imshow("Output", frame)
# at any time, you can press "q" and exit the main loop, therefore exiting the program itself
if cv2.waitKey(1) == ord("q"):
break
# ==============================================================================
# Post-pipeline treatment
# ==============================================================================
# save files
# timestamp = storeData(frame, detections_list, OUTPUT_PATH, IMG_QUALITY)
# ==============================================================================
# This selects an optimal frame (not too blurry, most common detection number, etc)
# ==============================================================================
#s2_history = streamHistory([], [], [], [])
#for i in range(0, len(s_history.blur_scores)):
# if (
# s_history.blur_scores[i] > BLUR_THRESHOLD
# and s_history.detections_numbers[i] > 0
# ):
# s2_history.update(s_history.frames[i], s_history.detections_lists[i])
#if len(s2_history.frames) > 0:
if 0==0:
# most_common_detection_number = Most_Common(s2_history.detections_numbers)
# Keep the clearest frame
blur_candidate, i_candidate = 0, 0
for i in range(0, len(s_history.blur_scores)):
if (
s_history.blur_scores[i] > blur_candidate
and s_history.lum_scores[i] < LUM_THRESHOLD
):
blur_candidate, i_candidate = s_history.blur_scores[i], i
index_MCDN = i_candidate
frame = s_history.frames[index_MCDN]
detections_list = s_history.detections_lists[index_MCDN]
else:
frame = s_history.frames[-1]
detections_list = s_history.detections_lists[-1]
timestamp = storeData(frame, detections_list, s_history, OUTPUT_PATH, IMG_QUALITY)
# log
print(
"=======================================================================\n\
=== Over: "
+ timestamp
+ " =========================================\n\
======================================================================= \n\
Resolution: "
+ str(RES)
+ "\n\
Confidence threshold: "
+ str(CONF)
+ " & blur threshold: "
+ str(BLUR_THRESHOLD)
+ "\n\
Blur score: "
+ str(getBlurScore(frame))
+ "\n\
Lum score: "
+ str(getLumScore(frame))
+ "\n\
Frame is clear: "
+ str(isClear(frame, BLUR_THRESHOLD))
+ "\n\
Number of detections: "
+ str(len(detections_list))
+ "\n\
======================================================================="
)
# Show output and plot stream_history
if args.show_output:
import matplotlib.pyplot as plt
cv2.destroyAllWindows()
y1, y2 = s_history.detections_numbers, [
b_s / BLUR_THRESHOLD for b_s in s_history.blur_scores
]
x = range(0, len(y1))
plt.scatter(x, y1)
plt.scatter(x, y2)
plt.show()
if detections_list != []:
cv2.imshow("Boxed frame kept", boxTheFrame(frame, detections_list))
else:
cv2.imshow("Frame kept", frame)
cv2.waitKey(0)
# # Getting back the objects:
# with open('objs.pkl') as f: # Python 3: open(..., 'rb')
# obj0, obj1, obj2 = pickle.load(f)
# ==============================================================================