Neural Depth Align
Supported on:RVC4
Pipeline
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3import numpy as np
4import cv2
5import depthai as dai
6import time
7from datetime import timedelta
8FPS = 10
9
10RGB_SOCKET = dai.CameraBoardSocket.CAM_A
11LEFT_SOCKET = dai.CameraBoardSocket.CAM_B
12RIGHT_SOCKET = dai.CameraBoardSocket.CAM_C
13
14class FPSCounter:
15 def __init__(self):
16 self.frameTimes = []
17
18 def tick(self):
19 now = time.time()
20 self.frameTimes.append(now)
21 self.frameTimes = self.frameTimes[-10:]
22
23 def getFps(self):
24 if len(self.frameTimes) <= 1:
25 return 0
26 return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0])
27
28pipeline = dai.Pipeline()
29
30platform = pipeline.getDefaultDevice().getPlatform()
31
32# Define sources and outputs
33camRgb = pipeline.create(dai.node.Camera).build(RGB_SOCKET)
34left = pipeline.create(dai.node.Camera).build(LEFT_SOCKET)
35right = pipeline.create(dai.node.Camera).build(RIGHT_SOCKET)
36stereo = pipeline.create(dai.node.NeuralDepth)
37sync = pipeline.create(dai.node.Sync)
38align = pipeline.create(dai.node.ImageAlign)
39
40sync.setSyncThreshold(timedelta(seconds=1/(2*FPS)))
41
42rgbOut = camRgb.requestOutput(size = (1280, 960), fps = FPS, enableUndistortion=True)
43leftOut = left.requestFullResolutionOutput(fps = FPS)
44rightOut = right.requestFullResolutionOutput(fps = FPS)
45stereo.build(leftOut, rightOut, dai.DeviceModelZoo.NEURAL_DEPTH_LARGE)
46# Linking
47stereo.depth.link(align.input)
48rgbOut.link(align.inputAlignTo)
49rgbOut.link(sync.inputs["rgb"])
50align.outputAligned.link(sync.inputs["depth_aligned"])
51
52queue = sync.out.createOutputQueue()
53
54def colorizeDepth(frameDepth):
55 invalidMask = frameDepth == 0
56 # Log the depth, minDepth and maxDepth
57 try:
58 minDepth = np.percentile(frameDepth[frameDepth != 0], 3)
59 maxDepth = np.percentile(frameDepth[frameDepth != 0], 95)
60 logDepth = np.log(frameDepth, where=frameDepth != 0)
61 logMinDepth = np.log(minDepth)
62 logMaxDepth = np.log(maxDepth)
63 np.nan_to_num(logDepth, copy=False, nan=logMinDepth)
64 # Clip the values to be in the 0-255 range
65 logDepth = np.clip(logDepth, logMinDepth, logMaxDepth)
66
67 # Interpolate only valid logDepth values, setting the rest based on the mask
68 depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255))
69 depthFrameColor = np.nan_to_num(depthFrameColor)
70 depthFrameColor = depthFrameColor.astype(np.uint8)
71 depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET)
72 # Set invalid depth pixels to black
73 depthFrameColor[invalidMask] = 0
74 except IndexError:
75 # Frame is likely empty
76 depthFrameColor = np.zeros((frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8)
77 except Exception as e:
78 raise e
79 return depthFrameColor
80
81
82rgbWeight = 0.4
83depthWeight = 0.6
84
85
86def updateBlendWeights(percentRgb):
87 """
88 Update the rgb and depth weights used to blend depth/rgb image
89
90 @param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
91 """
92 global depthWeight
93 global rgbWeight
94 rgbWeight = float(percentRgb) / 100.0
95 depthWeight = 1.0 - rgbWeight
96
97
98# Connect to device and start pipeline
99with pipeline:
100 pipeline.start()
101
102 # Configure windows; trackbar adjusts blending ratio of rgb/depth
103 windowName = "rgb-depth"
104
105 # Set the window to be resizable and the initial size
106 cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
107 cv2.resizeWindow(windowName, 1280, 720)
108 cv2.createTrackbar(
109 "RGB Weight %",
110 windowName,
111 int(rgbWeight * 100),
112 100,
113 updateBlendWeights,
114 )
115 fpsCounter = FPSCounter()
116 while True:
117 messageGroup = queue.get()
118 fpsCounter.tick()
119 assert isinstance(messageGroup, dai.MessageGroup)
120 frameRgb = messageGroup["rgb"]
121 assert isinstance(frameRgb, dai.ImgFrame)
122 frameDepth = messageGroup["depth_aligned"]
123 assert isinstance(frameDepth, dai.ImgFrame)
124
125 # Blend when both received
126 if frameDepth is not None:
127 cvFrame = frameRgb.getCvFrame()
128 # Colorize the aligned depth
129 alignedDepthColorized = colorizeDepth(frameDepth.getFrame())
130 # Resize depth to match the rgb frame
131 cv2.imshow("Depth aligned", alignedDepthColorized)
132
133 if len(cvFrame.shape) == 2:
134 cvFrameUndistorted = cv2.cvtColor(cvFrame, cv2.COLOR_GRAY2BGR)
135 # print("RGB Size:", cvFrame.shape)
136 # print("Depth Size:", alignedDepthColorized.shape)
137 blended = cv2.addWeighted(
138 cvFrame, rgbWeight, alignedDepthColorized, depthWeight, 0
139 )
140 cv2.putText(
141 blended,
142 f"FPS: {fpsCounter.getFps():.2f}",
143 (10, 30),
144 cv2.FONT_HERSHEY_SIMPLEX,
145 1,
146 (255, 255, 255),
147 2,
148 )
149 cv2.imshow(windowName, blended)
150
151 key = cv2.waitKey(1)
152 if key == ord("q"):
153 breakNeed assistance?
Head over to Discussion Forum for technical support or any other questions you might have.