Demo
This example requires the DepthAI v3 API, see installation instructions.Pipeline
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3import numpy as np
4import cv2
5import depthai as dai
6import time
7from datetime import timedelta
8FPS = 25.0
9
10RGB_SOCKET = dai.CameraBoardSocket.CAM_A
11LEFT_SOCKET = dai.CameraBoardSocket.CAM_B
12RIGHT_SOCKET = dai.CameraBoardSocket.CAM_C
13
14class FPSCounter:
15 def __init__(self):
16 self.frameTimes = []
17
18 def tick(self):
19 now = time.time()
20 self.frameTimes.append(now)
21 self.frameTimes = self.frameTimes[-10:]
22
23 def getFps(self):
24 if len(self.frameTimes) <= 1:
25 return 0
26 return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0])
27
28pipeline = dai.Pipeline()
29
30platform = pipeline.getDefaultDevice().getPlatform()
31
32# Define sources and outputs
33camRgb = pipeline.create(dai.node.Camera).build(RGB_SOCKET)
34left = pipeline.create(dai.node.Camera).build(LEFT_SOCKET)
35right = pipeline.create(dai.node.Camera).build(RIGHT_SOCKET)
36stereo = pipeline.create(dai.node.StereoDepth)
37sync = pipeline.create(dai.node.Sync)
38if platform == dai.Platform.RVC4:
39 align = pipeline.create(dai.node.ImageAlign)
40
41stereo.setExtendedDisparity(True)
42sync.setSyncThreshold(timedelta(seconds=1/(2*FPS)))
43
44rgbOut = camRgb.requestOutput(size = (1280, 960), fps = FPS, enableUndistortion=True)
45leftOut = left.requestOutput(size = (640, 400), fps = FPS)
46rightOut = right.requestOutput(size = (640, 400), fps = FPS)
47
48# Linking
49rgbOut.link(sync.inputs["rgb"])
50leftOut.link(stereo.left)
51rightOut.link(stereo.right)
52if platform == dai.Platform.RVC4:
53 stereo.depth.link(align.input)
54 rgbOut.link(align.inputAlignTo)
55 align.outputAligned.link(sync.inputs["depth_aligned"])
56else:
57 stereo.depth.link(sync.inputs["depth_aligned"])
58 rgbOut.link(stereo.inputAlignTo)
59
60queue = sync.out.createOutputQueue()
61
62def colorizeDepth(frameDepth):
63 invalidMask = frameDepth == 0
64 # Log the depth, minDepth and maxDepth
65 try:
66 minDepth = np.percentile(frameDepth[frameDepth != 0], 3)
67 maxDepth = np.percentile(frameDepth[frameDepth != 0], 95)
68 logDepth = np.zeros_like(frameDepth, dtype=np.float32)
69 np.log(frameDepth, where=frameDepth != 0, out=logDepth)
70 logMinDepth = np.log(minDepth)
71 logMaxDepth = np.log(maxDepth)
72 np.nan_to_num(logDepth, copy=False, nan=logMinDepth)
73 # Clip the values to be in the 0-255 range
74 logDepth = np.clip(logDepth, logMinDepth, logMaxDepth)
75
76 # Interpolate only valid logDepth values, setting the rest based on the mask
77 depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255))
78 depthFrameColor = np.nan_to_num(depthFrameColor)
79 depthFrameColor = depthFrameColor.astype(np.uint8)
80 depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET)
81 # Set invalid depth pixels to black
82 depthFrameColor[invalidMask] = 0
83 except IndexError:
84 # Frame is likely empty
85 depthFrameColor = np.zeros((frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8)
86 except Exception as e:
87 raise e
88 return depthFrameColor
89
90
91rgbWeight = 0.4
92depthWeight = 0.6
93
94
95def updateBlendWeights(percentRgb):
96 """
97 Update the rgb and depth weights used to blend depth/rgb image
98
99 @param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
100 """
101 global depthWeight
102 global rgbWeight
103 rgbWeight = float(percentRgb) / 100.0
104 depthWeight = 1.0 - rgbWeight
105
106
107# Connect to device and start pipeline
108with pipeline:
109 pipeline.start()
110
111 # Configure windows; trackbar adjusts blending ratio of rgb/depth
112 windowName = "rgb-depth"
113
114 # Set the window to be resizable and the initial size
115 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
116 cv2.resizeWindow(windowName, 1280, 720)
117 cv2.createTrackbar(
118 "RGB Weight %",
119 windowName,
120 int(rgbWeight * 100),
121 100,
122 updateBlendWeights,
123 )
124 fpsCounter = FPSCounter()
125 while True:
126 messageGroup = queue.get()
127 fpsCounter.tick()
128 assert isinstance(messageGroup, dai.MessageGroup)
129 frameRgb = messageGroup["rgb"]
130 assert isinstance(frameRgb, dai.ImgFrame)
131 frameDepth = messageGroup["depth_aligned"]
132 assert isinstance(frameDepth, dai.ImgFrame)
133
134 # Blend when both received
135 if frameDepth is not None:
136 cvFrame = frameRgb.getCvFrame()
137 # Colorize the aligned depth
138 alignedDepthColorized = colorizeDepth(frameDepth.getFrame())
139 # Resize depth to match the rgb frame
140 cv2.imshow("Depth aligned", alignedDepthColorized)
141
142 if len(cvFrame.shape) == 2:
143 cvFrameUndistorted = cv2.cvtColor(cvFrame, cv2.COLOR_GRAY2BGR)
144 blended = cv2.addWeighted(
145 cvFrame, rgbWeight, alignedDepthColorized, depthWeight, 0
146 )
147 cv2.putText(
148 blended,
149 f"FPS: {fpsCounter.getFps():.2f}",
150 (10, 30),
151 cv2.FONT_HERSHEY_SIMPLEX,
152 1,
153 (255, 255, 255),
154 2,
155 )
156 cv2.imshow(windowName, blended)
157
158 key = cv2.waitKey(1)
159 if key == ord("q"):
160 breakNeed assistance?
Head over to Discussion Forum for technical support or any other questions you might have.