Custom Visualizations
The example creates a DepthAI pipeline in Python to perform YOLOv6-Nano object detection, generates custom image annotations with bounding boxes and text, and streams the results along with 640x480 NV12 camera frames over to a remote connection for visualization.Setup
This example requires the DepthAI v3 API, see installation instructions.Pipeline
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3import depthai as dai
4
5class ImgDetectionsExtended(dai.ImgDetections):
6 def __init__(self, detections: dai.ImgDetections):
7 dai.ImgDetections.__init__(self)
8 self.detections = detections.detections
9
10 # The function can return dai.ImgAnnotations or dai.ImgFrame
11 def getVisualizationMessage(self):
12 detections = self.detections
13 imgAnnt = dai.ImgAnnotations()
14 # Setting the timestamp is important, as the visualizer uses it to synchronize the data
15 imgAnnt.setTimestamp(self.getTimestamp())
16 annotation = dai.ImgAnnotation()
17 for detection in detections:
18 pointsAnnotation = dai.PointsAnnotation()
19 pointsAnnotation.type = dai.PointsAnnotationType.LINE_STRIP
20 pointsAnnotation.points = dai.VectorPoint2f([
21 dai.Point2f(detection.xmin, detection.ymin),
22 dai.Point2f(detection.xmax, detection.ymin),
23 dai.Point2f(detection.xmax, detection.ymax),
24 dai.Point2f(detection.xmin, detection.ymax),
25 ])
26 outlineColor = dai.Color(1.0, 0.5, 0.5, 1.0)
27 pointsAnnotation.outlineColor = outlineColor
28 fillColor = dai.Color(0.5, 1.0, 0.5, 0.5)
29 pointsAnnotation.fillColor = fillColor
30 pointsAnnotation.thickness = 2.0
31 text = dai.TextAnnotation()
32 text.position = dai.Point2f(detection.xmin, detection.ymin)
33 text.text = f"Test annotation"
34 text.fontSize = 50.5
35 textColor = dai.Color(0.5, 0.5, 1.0, 1.0)
36 text.textColor = textColor
37 backgroundColor = dai.Color(1.0, 1.0, 0.5, 1.0)
38 text.backgroundColor = backgroundColor
39 annotation.points.append(pointsAnnotation)
40 annotation.texts.append(text)
41
42 imgAnnt.annotations.append(annotation)
43 return imgAnnt
44
45class ImgAnnotationsGenerator(dai.node.ThreadedHostNode):
46 def __init__(self):
47 super().__init__()
48 self.inputDet = self.createInput()
49 self.output = self.createOutput()
50
51 def run(self):
52 while self.isRunning():
53 nnData = self.inputDet.get()
54 extended = ImgDetectionsExtended(nnData)
55 # Setting the timestamp is important, as the visualizer uses it to synchronize the data
56 extended.setTimestamp(nnData.getTimestamp())
57 self.output.send(extended)
58
59remoteConnector = dai.RemoteConnection()
60
61# Create pipeline
62with dai.Pipeline() as pipeline:
63 cameraNode = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
64 detectionNetwork = pipeline.create(dai.node.DetectionNetwork).build(
65 cameraNode, dai.NNModelDescription("yolov6-nano")
66 )
67 imageAnnotationsGenerator = pipeline.create(ImgAnnotationsGenerator)
68 outputToVisualize = cameraNode.requestOutput((640,480), type=dai.ImgFrame.Type.NV12)
69 detectionNetwork.out.link(imageAnnotationsGenerator.inputDet)
70
71 # Add the remote connector topics
72 remoteConnector.addTopic("encoded", outputToVisualize, "images")
73 remoteConnector.addTopic("detections", detectionNetwork.out, "images")
74 remoteConnector.addTopic("annotations", imageAnnotationsGenerator.output, "images")
75
76 pipeline.start()
77
78 # Register the pipeline with the remote connector
79 remoteConnector.registerPipeline(pipeline)
80
81 while pipeline.isRunning():
82 if remoteConnector.waitKey(1) == ord("q"):
83 pipeline.stop()
84 break
Need assistance?
Head over to Discussion Forum for technical support or any other questions you might have.