Hi!

I've mounted my camera in an outdoor setting to identify animals passing through. When I start the camera, I get odd images which slowly pan out to a more normal camera output. Any idea what might be causing this and how I can best solve it?

^ example of image

    TheOracle
    What does your script look like? Seems like it could be an exposure issue or something with ISP.

    Thanks,
    Jaka

      jakaskerl

      My script is pretty simple:

      import cv2
      import depthai as dai
      
      # Create pipeline
      pipeline = dai.Pipeline()
      
      # Define source and output
      camRgb = pipeline.create(dai.node.ColorCamera)
      xoutRgb = pipeline.create(dai.node.XLinkOut)
      
      xoutRgb.setStreamName("rgb")
      
      # Properties
      camRgb.setPreviewSize(640, 640)
      camRgb.setInterleaved(False)
      camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
      
      # Linking
      camRgb.preview.link(xoutRgb.input)
      
      # Connect to device and start pipeline
      with dai.Device(pipeline) as device:
      
          print('Connected cameras:', device.getConnectedCameraFeatures())
          # Print out usb speed
          print('Usb speed:', device.getUsbSpeed().name)
          # Bootloader version
          if device.getBootloaderVersion() is not None:
              print('Bootloader version:', device.getBootloaderVersion())
          # Device name
          print('Device name:', device.getDeviceName(), ' Product name:', device.getProductName())
      
          # Output queue will be used to get the rgb frames from the output defined above
          qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
      
          while True:
              inRgb = qRgb.get()  # blocking call, will wait until a new data has arrived
      
              # Retrieve 'bgr' (opencv format) frame
              cv2.imshow("bgr", inRgb.getCvFrame())
      
              if cv2.waitKey(1) == ord('q'):
                  break

        Seems like you are displaying the frame with OpenCV, which expects BGR, while setting the color order to RGB.
        No clue if this would cause the shown behaviour though - would have to run it myself to confirm.

          MernokAdriaan I think I tried it both ways and there was no difference.

          jakaskerl It's an OAK 1 POE

          I'm also running into another issue.

          What's the workflow to run a YOLO model on the OAK camera? I'm trying the blob converter with the model exports from Ultralytics hub but that isn't working. I'll post it as a seperate issue, and perhaps you can help me on that as well.

          Thank you for your time!

            TheOracle
            Might be a HW issue..

            TheOracle What's the workflow to run a YOLO model on the OAK camera? I'm trying the blob converter with the model exports from Ultralytics hub but that isn't working. I'll post it as a seperate issue, and perhaps you can help me on that as well.

            https://tools.luxonis.com should be used 🙂

            Thanks,
            Jaka

            I managed to make it work using some Docs and mainly ChatGPT(!).

            However, I can't seem to integrate the ObjectTracker node into this program without running into some error; there have just been too many.

            
            from pathlib import Path
            import sys
            import cv2
            import depthai as dai
            import numpy as np
            import time
            
            # Get yolo v8n model blob file path
            nnPath = str("03022025.blob")
            if not Path(nnPath).exists():
                import sys
                raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
            
            # yolo v8 abel texts
            labelMap = ["goat", "pig", "sheep"]
            
            syncNN = True
            
            # Create pipeline
            pipeline = dai.Pipeline()
            
            # Define sources and outputs
            camRgb = pipeline.create(dai.node.ColorCamera)
            detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)
            xoutRgb = pipeline.create(dai.node.XLinkOut)
            nnOut = pipeline.create(dai.node.XLinkOut)
            
            xoutRgb.setStreamName("rgb")
            nnOut.setStreamName("nn")
            
            # Properties
            camRgb.setPreviewSize(640, 640)
            camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
            camRgb.setInterleaved(False)
            camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
            camRgb.setFps(40)
            
            # Network specific settings
            detectionNetwork.setConfidenceThreshold(0.5)
            detectionNetwork.setNumClasses(3)
            detectionNetwork.setCoordinateSize(4)
            detectionNetwork.setIouThreshold(0.5)
            detectionNetwork.setBlobPath(nnPath)
            detectionNetwork.setNumInferenceThreads(2) 
            detectionNetwork.input.setBlocking(True) # From False
            
            # Linking
            camRgb.preview.link(detectionNetwork.input)
            if syncNN:
                detectionNetwork.passthrough.link(xoutRgb.input)
            else:
                camRgb.preview.link(xoutRgb.input)
            
            detectionNetwork.out.link(nnOut.input)
            
            # Connect to device and start pipeline
            with dai.Device(pipeline) as device:
            
                # Output queues will be used to get the rgb frames and nn data from the outputs defined above
                qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
                qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
            
                frame = None
                detections = []
                startTime = time.monotonic()
                counter = 0
                color2 = (255, 255, 255)
            
                # nn data, being the bounding box locations, are in <0..1> range - they need to be normalised with frame width/height
                def frameNorm(frame, bbox):
                    normVals = np.full(len(bbox), frame.shape[0])
                    normVals[::2] = frame.shape[1]
                    return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
            
                def displayFrame(name, frame):
                    color = (255, 0, 0)
                    for detection in detections:
                        bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
                        cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
                        cv2.putText(frame, f"{detection.confidence}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255)
                        cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
                    # Show the frame
                    cv2.imshow(name, frame)
            
                while True:
                    if syncNN:
                        inRgb = qRgb.get()
                        inDet = qDet.get()
                    else:
                        inRgb = qRgb.tryGet()
                        inDet = qDet.tryGet()
            
                    if inRgb is not None:
                        frame = inRgb.getCvFrame()
                        cv2.putText(frame, "NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)),
                                    (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color2)
            
                    if inDet is not None:
                        detections = inDet.detections
                        counter += 1
            
                    if frame is not None:
                        displayFrame("rgb", frame)
            
                    if cv2.waitKey(1) == ord('q'):
                        break

            This is my program, so far, any assistance into integrating ObjectTracking would be appreciated!

            Thanks in advance.