DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • Auto Exposure on ROI
  • Demo
  • Setup
  • Source code
  • Pipeline

Auto Exposure on ROI

This example shows how to dynamically set the Auto Exposure (AE) of the RGB camera dynamically, during application runtime, based on bounding box position.By default, AutoExposure region is adjusted based on neural network output. If desired, the region can be set manually. You can do so by pressing one of the following buttons:
  • w - move AE region up
  • s - move AE region down
  • a - move AE region left
  • d - move AE region right
  • n - deactivate manual region (switch back to nn-based roi)

Demo

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python

Python

Python
GitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import sys
5import cv2
6import depthai as dai
7import numpy as np
8
9# Press WASD to move a manual ROI window for auto-exposure control.
10# Press N to go back to the region controlled by the NN detections.
11
12# Get argument first
13nnPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
14if len(sys.argv) > 1:
15    nnPath = sys.argv[1]
16
17if not Path(nnPath).exists():
18    import sys
19    raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
20
21previewSize = (300, 300)
22
23# Create pipeline
24pipeline = dai.Pipeline()
25
26# Define source and outputs
27camRgb = pipeline.create(dai.node.ColorCamera)
28camRgb.setPreviewSize(*previewSize)
29camRgb.setInterleaved(False)
30
31camControlIn = pipeline.create(dai.node.XLinkIn)
32camControlIn.setStreamName('camControl')
33camControlIn.out.link(camRgb.inputControl)
34
35# Define a neural network that will make predictions based on the source frames
36nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
37nn.setConfidenceThreshold(0.5)
38nn.setBlobPath(nnPath)
39nn.setNumInferenceThreads(2)
40nn.input.setBlocking(False)
41camRgb.preview.link(nn.input)
42
43# Linking
44xoutRgb = pipeline.create(dai.node.XLinkOut)
45xoutRgb.setStreamName("rgb")
46camRgb.preview.link(xoutRgb.input)
47
48nnOut = pipeline.create(dai.node.XLinkOut)
49nnOut.setStreamName("nn")
50nn.out.link(nnOut.input)
51
52# MobilenetSSD label texts
53labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
54            "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
55
56def clamp(num, v0, v1):
57    return max(v0, min(num, v1))
58
59def asControl(roi):
60    camControl = dai.CameraControl()
61    camControl.setAutoExposureRegion(*roi)
62    return camControl
63
64class AutoExposureRegion:
65    step = 10
66    position = (0, 0)
67    size = (100, 100)
68    resolution = camRgb.getResolutionSize()
69    maxDims = previewSize[0], previewSize[1]
70
71    def grow(self, x=0, y=0):
72        self.size = (
73            clamp(x + self.size[0], 1, self.maxDims[0]),
74            clamp(y + self.size[1], 1, self.maxDims[1])
75        )
76
77    def move(self, x=0, y=0):
78        self.position = (
79            clamp(x + self.position[0], 0, self.maxDims[0]),
80            clamp(y + self.position[1], 0, self.maxDims[1])
81        )
82
83    def endPosition(self):
84        return (
85            clamp(self.position[0] + self.size[0], 0, self.maxDims[0]),
86            clamp(self.position[1] + self.size[1], 0, self.maxDims[1]),
87        )
88
89    def toRoi(self):
90        roi = np.array([*self.position, *self.size])
91        # Convert to absolute camera coordinates
92        roi = roi * self.resolution[1] // 300
93        roi[0] += (self.resolution[0] - self.resolution[1]) // 2  # x offset for device crop
94        return roi
95
96    @staticmethod
97    def bboxToRoi(bbox):
98        startX, startY = bbox[:2]
99        width, height = bbox[2] - startX, bbox[3] - startY
100        roi = frameNorm(np.empty(camRgb.getResolutionSize()), (startX, startY, width, height))
101        return roi
102
103# Connect to device and start pipeline
104with dai.Device(pipeline) as device:
105
106    # Output queues will be used to get the rgb frames and nn data from the outputs defined above
107    qControl = device.getInputQueue(name="camControl")
108    qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
109    qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
110    frame = None
111    detections = []
112
113    nnRegion = True
114    region = AutoExposureRegion()
115
116    # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height
117    def frameNorm(frame, bbox):
118        normVals = np.full(len(bbox), frame.shape[0])
119        normVals[::2] = frame.shape[1]
120        return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
121
122    def displayFrame(name, frame):
123        for detection in detections:
124            bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
125            cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
126            cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
127            cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
128        if not nnRegion:
129            cv2.rectangle(frame, region.position, region.endPosition(), (0, 255, 0), 2)
130        cv2.imshow(name, frame)
131
132    while True:
133        # Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
134        inRgb = qRgb.tryGet()
135        inDet = qDet.tryGet()
136
137        if inRgb is not None:
138            frame = inRgb.getCvFrame()
139
140        if inDet is not None:
141            detections = inDet.detections
142
143            if nnRegion and len(detections) > 0:
144                bbox = (detections[0].xmin, detections[0].ymin, detections[0].xmax, detections[0].ymax)
145                qControl.send(asControl(AutoExposureRegion.bboxToRoi(bbox)))
146
147        if frame is not None:
148            displayFrame("rgb", frame)
149
150        key = cv2.waitKey(1)
151        if key == ord('n'):
152            print("AE ROI controlled by NN")
153            nnRegion = True
154        elif key in [ord('w'), ord('a'), ord('s'), ord('d'), ord('+'), ord('-')]:
155            nnRegion = False
156            if key == ord('a'):
157                region.move(x=-region.step)
158            if key == ord('d'):
159                region.move(x=region.step)
160            if key == ord('w'):
161                region.move(y=-region.step)
162            if key == ord('s'):
163                region.move(y=region.step)
164            if key == ord('+'):
165                region.grow(x=10, y=10)
166                region.step = region.step + 1
167            if key == ord('-'):
168                region.grow(x=-10, y=-10)
169                region.step = max(region.step - 1, 1)
170            print(f"Setting static AE ROI: {region.toRoi()} (on frame: {[*region.position, *region.endPosition()]})")
171            qControl.send(asControl(region.toRoi()))
172        elif key == ord('q'):
173            break

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.