Spatial detections on rotated OAK
This example is very similar to RGB & MobilenetSSD with spatial data - it only assumes we have OAK rotated by 180° (upside down)ColorCamera frames are rotated on the sensor itself, by settingcamRgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG)
. This means all outputs from the node (still/isp/video/preview) will already be rotated.We rotate depth
frames after the StereoDepth creates them. One might try rotating mono frames before sending them to the StereoDepth node, but this wouldn't work as stereo calibration would need to reflect such changes. So we use the ImageManip node to rotate depth
(code below) and then send it to the MobileNetSpatialDetectionNetwork.Command Line
1manip = pipeline.createImageManip()
2 # Vertical + Horizontal flip == rotate frame for 180°
3 manip.initialConfig.setVerticalFlip(True)
4 manip.initialConfig.setHorizontalFlip(True)
5 manip.setFrameType(dai.ImgFrame.Type.RAW16)
6 stereo.depth.link(manip.inputImage)
Similar samples:
- RGB & MobilenetSSD with spatial data
- Spatial object tracker on RGB
- Mono & MobilenetSSD with spatial data
- RGB & TinyYolo with spatial data
Setup
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the scriptCommand Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import sys
5import cv2
6import depthai as dai
7import numpy as np
8'''
9Spatial object detections demo for 180° rotated OAK camera.
10'''
11
12# Get argument first
13nnBlobPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
14if len(sys.argv) > 1:
15 nnBlobPath = sys.argv[1]
16
17if not Path(nnBlobPath).exists():
18 import sys
19 raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
20
21# MobilenetSSD label texts
22labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
23 "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
24
25syncNN = True
26
27# Create pipeline
28pipeline = dai.Pipeline()
29
30# Define sources and outputs
31camRgb = pipeline.createColorCamera()
32spatialDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork)
33monoLeft = pipeline.create(dai.node.MonoCamera)
34monoRight = pipeline.create(dai.node.MonoCamera)
35stereo = pipeline.create(dai.node.StereoDepth)
36
37xoutRgb = pipeline.create(dai.node.XLinkOut)
38xoutNN = pipeline.create(dai.node.XLinkOut)
39xoutDepth = pipeline.create(dai.node.XLinkOut)
40
41xoutRgb.setStreamName("rgb")
42xoutNN.setStreamName("detections")
43xoutDepth.setStreamName("depth")
44
45# Properties
46camRgb.setPreviewSize(300, 300)
47camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
48camRgb.setInterleaved(False)
49camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
50camRgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG)
51
52monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
53monoLeft.setCamera("left")
54monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
55monoRight.setCamera("right")
56
57# Setting node configs
58stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
59# Align depth map to the perspective of RGB camera, on which inference is done
60stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
61stereo.setSubpixel(True)
62stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
63
64rotate_stereo_manip = pipeline.createImageManip()
65rotate_stereo_manip.initialConfig.setVerticalFlip(True)
66rotate_stereo_manip.initialConfig.setHorizontalFlip(True)
67rotate_stereo_manip.setFrameType(dai.ImgFrame.Type.RAW16)
68stereo.depth.link(rotate_stereo_manip.inputImage)
69
70spatialDetectionNetwork.setBlobPath(nnBlobPath)
71spatialDetectionNetwork.setConfidenceThreshold(0.5)
72spatialDetectionNetwork.input.setBlocking(False)
73spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
74spatialDetectionNetwork.setDepthLowerThreshold(100)
75spatialDetectionNetwork.setDepthUpperThreshold(5000)
76
77# Linking
78monoLeft.out.link(stereo.left)
79monoRight.out.link(stereo.right)
80
81camRgb.preview.link(spatialDetectionNetwork.input)
82if syncNN:
83 spatialDetectionNetwork.passthrough.link(xoutRgb.input)
84else:
85 camRgb.preview.link(xoutRgb.input)
86
87spatialDetectionNetwork.out.link(xoutNN.input)
88
89rotate_stereo_manip.out.link(spatialDetectionNetwork.inputDepth)
90spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input)
91color = (255, 0, 0)
92# Connect to device and start pipeline
93with dai.Device(pipeline) as device:
94
95 # Output queues will be used to get the rgb frames and nn data from the outputs defined above
96 previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
97 detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
98 depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
99
100 while True:
101 inPreview = previewQueue.get()
102 inDet = detectionNNQueue.get()
103 depth = depthQueue.get()
104
105 frame = inPreview.getCvFrame()
106 depthFrame = depth.getFrame() # depthFrame values are in millimeters
107
108 depth_downscaled = depthFrame[::4]
109 if np.all(depth_downscaled == 0):
110 min_depth = 0 # Set a default minimum depth value when all elements are zero
111 else:
112 min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
113 max_depth = np.percentile(depth_downscaled, 99)
114 depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
115 depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
116
117 detections = inDet.detections
118
119 # If the frame is available, draw bounding boxes on it and show the frame
120 height = frame.shape[0]
121 width = frame.shape[1]
122 for detection in detections:
123 roiData = detection.boundingBoxMapping
124 roi = roiData.roi
125 roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])
126 topLeft = roi.topLeft()
127 bottomRight = roi.bottomRight()
128 xmin = int(topLeft.x)
129 ymin = int(topLeft.y)
130 xmax = int(bottomRight.x)
131 ymax = int(bottomRight.y)
132 cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
133
134 # Denormalize bounding box
135 x1 = int(detection.xmin * width)
136 x2 = int(detection.xmax * width)
137 y1 = int(detection.ymin * height)
138 y2 = int(detection.ymax * height)
139 try:
140 label = labelMap[detection.label]
141 except:
142 label = detection.label
143 cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
144 cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
145 cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
146 cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
147 cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
148
149 cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), cv2.FONT_HERSHEY_SIMPLEX)
150
151 cv2.imshow("depth", depthFrameColor)
152 cv2.imshow("preview", frame)
153
154 if cv2.waitKey(1) == ord('q'):
155 break
Pipeline
Need assistance?
Head over to Discussion Forum for technical support or any other questions you might have.