Demo
Pipeline
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5import numpy as np
6
7def colorizeDepth(frameDepth):
8 invalidMask = frameDepth == 0
9 # Log the depth, minDepth and maxDepth
10 try:
11 minDepth = np.percentile(frameDepth[frameDepth != 0], 3)
12 maxDepth = np.percentile(frameDepth[frameDepth != 0], 95)
13 logDepth = np.zeros_like(frameDepth, dtype=np.float32)
14 np.log(frameDepth, where=frameDepth != 0, out=logDepth)
15 logMinDepth = np.log(minDepth)
16 logMaxDepth = np.log(maxDepth)
17 np.nan_to_num(logDepth, copy=False, nan=logMinDepth)
18 # Clip the values to be in the 0-255 range
19 logDepth = np.clip(logDepth, logMinDepth, logMaxDepth)
20
21 # Interpolate only valid logDepth values, setting the rest based on the mask
22 depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255))
23 depthFrameColor = np.nan_to_num(depthFrameColor)
24 depthFrameColor = depthFrameColor.astype(np.uint8)
25 depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET)
26 # Set invalid depth pixels to black
27 depthFrameColor[invalidMask] = 0
28 except IndexError:
29 # Frame is likely empty
30 depthFrameColor = np.zeros((frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8)
31 except Exception as e:
32 raise e
33 return depthFrameColor
34
35# Create pipeline
36with dai.Pipeline() as pipeline:
37 cameraNode = pipeline.create(dai.node.Camera).build()
38 detectionNetwork = pipeline.create(dai.node.DetectionNetwork).build(cameraNode, dai.NNModelDescription("yolov6-nano"))
39 objectTracker = pipeline.create(dai.node.ObjectTracker)
40 labelMap = detectionNetwork.getClasses()
41 monoLeft = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
42 monoRight = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
43 stereo = pipeline.create(dai.node.StereoDepth)
44
45 # Linking
46 monoLeftOut = monoLeft.requestOutput((1280, 720))
47 monoRightOut = monoRight.requestOutput((1280, 720))
48 monoLeftOut.link(stereo.left)
49 monoRightOut.link(stereo.right)
50
51 detectionNetwork.out.link(objectTracker.inputDetections)
52 detectionNetwork.passthrough.link(objectTracker.inputDetectionFrame)
53 detectionNetwork.passthrough.link(objectTracker.inputTrackerFrame)
54
55 stereo.setRectification(True)
56 stereo.setExtendedDisparity(True)
57 stereo.setLeftRightCheck(True)
58 stereo.setSubpixel(True)
59
60
61 qRgb = detectionNetwork.passthrough.createOutputQueue()
62 qTrack = objectTracker.out.createOutputQueue()
63 qDepth = stereo.disparity.createOutputQueue()
64
65 pipeline.start()
66
67 def displayFrame(name: str, frame: dai.ImgFrame, tracklets: dai.Tracklets):
68 color = (0, 255, 0)
69 assert tracklets.getTransformation() is not None
70 cvFrame = frame.getFrame() if frame.getType() == dai.ImgFrame.Type.RAW16 else frame.getCvFrame()
71 if(frame.getType() == dai.ImgFrame.Type.RAW16):
72 cvFrame = colorizeDepth(cvFrame)
73 for tracklet in tracklets.tracklets:
74 # Get the shape of the frame from which the detections originated for denormalization
75 normShape = tracklets.getTransformation().getSize()
76
77 # Create rotated rectangle to remap
78 # Here we use an intermediate dai.Rect to create a dai.RotatedRect to simplify construction and denormalization
79 rotRect = dai.RotatedRect(tracklet.roi.denormalize(normShape[0], normShape[1]), 0)
80 # Remap the detection rectangle to target frame
81 remapped = tracklets.getTransformation().remapRectTo(frame.getTransformation(), rotRect)
82 # Remapped rectangle could be rotated, so we get the bounding box
83 bbox = [int(l) for l in remapped.getOuterRect()]
84 cv2.putText(
85 cvFrame,
86 labelMap[tracklet.label],
87 (bbox[0] + 10, bbox[1] + 20),
88 cv2.FONT_HERSHEY_TRIPLEX,
89 0.5,
90 255,
91 )
92 cv2.putText(
93 cvFrame,
94 f"{int(tracklet.srcImgDetection.confidence * 100)}%",
95 (bbox[0] + 10, bbox[1] + 40),
96 cv2.FONT_HERSHEY_TRIPLEX,
97 0.5,
98 255,
99 )
100 cv2.rectangle(cvFrame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
101 # Show the frame
102 cv2.imshow(name, cvFrame)
103
104 while pipeline.isRunning():
105 inRgb: dai.ImgFrame = qRgb.get()
106 inTrack: dai.Tracklets = qTrack.get()
107 inDepth: dai.ImgFrame = qDepth.get()
108 hasRgb = inRgb is not None
109 hasDepth = inDepth is not None
110 hasTrack = inTrack is not None
111 if hasRgb:
112 displayFrame("rgb", inRgb, inTrack)
113 if hasDepth:
114 displayFrame("depth", inDepth, inTrack)
115 if cv2.waitKey(1) == ord("q"):
116 pipeline.stop()
117 breakNeed assistance?
Head over to Discussion Forum for technical support or any other questions you might have.