DepthAI
Software Stack

ON THIS PAGE

  • Pipeline
  • Source code

Visualizer Encoded

Supported on:RVC2RVC4
The example utilizes DepthAI to create a pipeline that performs YOLOv6-Nano object detection, generates custom image annotations with bounding boxes and labels, encodes high-resolution (1920x1440) camera frames as MJPEG, and streams the results over to a remote connection for visualization.This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2
3import depthai as dai
4from argparse import ArgumentParser
5
6parser = ArgumentParser()
7parser.add_argument("--webSocketPort", type=int, default=8765)
8parser.add_argument("--httpPort", type=int, default=8082)
9
10args = parser.parse_args()
11
12remoteConnector = dai.RemoteConnection(webSocketPort=args.webSocketPort, httpPort=args.httpPort)
13ENCODER_PROFILE = dai.VideoEncoderProperties.Profile.MJPEG
14class ImgAnnotationsGenerator(dai.node.ThreadedHostNode):
15    def __init__(self):
16        super().__init__()
17        self.inputDet = self.createInput()
18        self.output = self.createOutput()
19
20    def setLabelMap(self, labelMap):
21        self.labelMap = labelMap
22    def run(self):
23        while self.mainLoop():
24            nnData = self.inputDet.get()
25            detections = nnData.detections
26            imgAnnt = dai.ImgAnnotations()
27            imgAnnt.setTimestamp(nnData.getTimestamp())
28            imgAnnt.setTransformation(nnData.getTransformation())
29            annotation = dai.ImgAnnotation()
30            for detection in detections:
31                pointsAnnotation = dai.PointsAnnotation()
32                pointsAnnotation.type = dai.PointsAnnotationType.LINE_STRIP
33                pointsAnnotation.points = dai.VectorPoint2f([
34                    dai.Point2f(detection.xmin, detection.ymin),
35                    dai.Point2f(detection.xmax, detection.ymin),
36                    dai.Point2f(detection.xmax, detection.ymax),
37                    dai.Point2f(detection.xmin, detection.ymax),
38                ])
39                outlineColor = dai.Color(1.0, 0.5, 0.5, 1.0)
40                pointsAnnotation.outlineColor = outlineColor
41                fillColor = dai.Color(0.5, 1.0, 0.5, 0.5)
42                pointsAnnotation.fillColor = fillColor
43                pointsAnnotation.thickness = 2.0
44                text = dai.TextAnnotation()
45                text.position = dai.Point2f(detection.xmin, detection.ymin)
46                text.text = f"{self.labelMap[detection.label]} {int(detection.confidence * 100)}%"
47                text.fontSize = 50.5
48                textColor = dai.Color(0.5, 0.5, 1.0, 1.0)
49                text.textColor = textColor
50                backgroundColor = dai.Color(1.0, 1.0, 0.5, 1.0)
51                text.backgroundColor = backgroundColor
52                annotation.points.append(pointsAnnotation)
53                annotation.texts.append(text)
54
55            imgAnnt.annotations.append(annotation)
56            self.output.send(imgAnnt)
57
58# Create pipeline
59with dai.Pipeline() as pipeline:
60    cameraNode = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
61    detectionNetwork = pipeline.create(dai.node.DetectionNetwork).build(
62        cameraNode, dai.NNModelDescription("yolov6-nano")
63    )
64    imageAnnotationsGenerator = pipeline.create(ImgAnnotationsGenerator)
65    outputToEncode = cameraNode.requestOutput((1920, 1440), type=dai.ImgFrame.Type.NV12)
66    encoder = pipeline.create(dai.node.VideoEncoder)
67    encoder.setDefaultProfilePreset(30, ENCODER_PROFILE)
68    outputToEncode.link(encoder.input)
69
70
71    detectionNetwork.out.link(imageAnnotationsGenerator.inputDet)
72    labelMap = detectionNetwork.getClasses()
73    imageAnnotationsGenerator.setLabelMap(labelMap)
74
75    # Add the remote connector topics
76    remoteConnector.addTopic("encoded", encoder.out, "images")
77    remoteConnector.addTopic("detections", detectionNetwork.out, "images")
78    remoteConnector.addTopic("annotations", imageAnnotationsGenerator.output, "images")
79
80    pipeline.start()
81
82    # Register the pipeline with the remote connector
83    remoteConnector.registerPipeline(pipeline)
84
85    while pipeline.isRunning():
86        if remoteConnector.waitKey(1) == ord("q"):
87            pipeline.stop()
88            break

C++

1#include <atomic>
2#include <csignal>
3#include <iostream>
4#include <memory>
5#include <string>
6#include <vector>
7
8#include "depthai/depthai.hpp"
9#include "depthai/pipeline/datatype/ImgAnnotations.hpp"
10#include "depthai/remote_connection/RemoteConnection.hpp"
11
12// Global flag for graceful shutdown
13std::atomic<bool> quitEvent(false);
14
15// Signal handler
16void signalHandler(int signum) {
17    quitEvent = true;
18}
19
20// Custom host node for image annotations
21class ImgAnnotationsGenerator : public dai::NodeCRTP<dai::node::HostNode, ImgAnnotationsGenerator> {
22   public:
23    Input& inputDet = inputs["detections"];
24    Output& output = out;
25
26    std::vector<std::string> labelMap;
27
28    std::shared_ptr<ImgAnnotationsGenerator> build(Output& detections) {
29        detections.link(inputDet);
30        return std::static_pointer_cast<ImgAnnotationsGenerator>(this->shared_from_this());
31    }
32
33    std::shared_ptr<dai::Buffer> processGroup(std::shared_ptr<dai::MessageGroup> in) override {
34        auto nnData = in->get<dai::ImgDetections>("detections");
35        auto detections = nnData->detections;
36
37        auto imgAnnt = std::make_shared<dai::ImgAnnotations>();
38        imgAnnt->setTimestamp(nnData->getTimestamp());
39        imgAnnt->transformation = nnData->transformation;
40
41        dai::ImgAnnotation annotation;
42        for(const auto& detection : detections) {
43            // Create points annotation for bounding box
44            auto pointsAnnotation = std::make_shared<dai::PointsAnnotation>();
45            pointsAnnotation->type = dai::PointsAnnotationType::LINE_STRIP;
46            pointsAnnotation->points = {dai::Point2f(detection.xmin, detection.ymin),
47                                        dai::Point2f(detection.xmax, detection.ymin),
48                                        dai::Point2f(detection.xmax, detection.ymax),
49                                        dai::Point2f(detection.xmin, detection.ymax)};
50
51            // Set colors and thickness
52            pointsAnnotation->outlineColor = dai::Color(1.0f, 0.5f, 0.5f, 1.0f);
53            pointsAnnotation->fillColor = dai::Color(0.5f, 1.0f, 0.5f, 0.5f);
54            pointsAnnotation->thickness = 2.0f;
55
56            // Create text annotation
57            auto text = std::make_shared<dai::TextAnnotation>();
58            text->position = dai::Point2f(detection.xmin, detection.ymin);
59
60            // Get label text
61            std::string labelText;
62            try {
63                labelText = labelMap[detection.label];
64            } catch(...) {
65                labelText = std::to_string(detection.label);
66            }
67
68            text->text = labelText + " " + std::to_string(static_cast<int>(detection.confidence * 100)) + "%";
69            text->fontSize = 50.5f;
70            text->textColor = dai::Color(0.5f, 0.5f, 1.0f, 1.0f);
71            text->backgroundColor = dai::Color(1.0f, 1.0f, 0.5f, 1.0f);
72
73            annotation.points.push_back(*pointsAnnotation);
74            annotation.texts.push_back(*text);
75        }
76
77        imgAnnt->annotations.push_back(annotation);
78        return imgAnnt;
79    }
80};
81
82int main(int argc, char** argv) {
83    // Set up signal handlers
84    signal(SIGTERM, signalHandler);
85    signal(SIGINT, signalHandler);
86
87    try {
88        // Create remote connection
89        dai::RemoteConnection remoteConnector;
90
91        // Create pipeline
92        dai::Pipeline pipeline;
93
94        // Create nodes
95        auto cameraNode = pipeline.create<dai::node::Camera>();
96        cameraNode->build(dai::CameraBoardSocket::CAM_A);
97
98        auto detectionNetwork = pipeline.create<dai::node::DetectionNetwork>();
99        dai::NNModelDescription modelDesc;
100        modelDesc.model = "yolov6-nano";
101        detectionNetwork->build(cameraNode, modelDesc);
102
103        auto imageAnnotationsGenerator = pipeline.create<ImgAnnotationsGenerator>();
104        auto outputToEncode = cameraNode->requestOutput(std::make_pair(1920, 1440), dai::ImgFrame::Type::NV12);
105
106        auto encoder = pipeline.create<dai::node::VideoEncoder>();
107        encoder->setDefaultProfilePreset(30, dai::VideoEncoderProperties::Profile::MJPEG);
108
109        // Linking
110        outputToEncode->link(encoder->input);
111        detectionNetwork->out.link(imageAnnotationsGenerator->inputDet);
112
113        // Set label map
114        imageAnnotationsGenerator->labelMap = detectionNetwork->getClasses().value();
115
116        // Add remote connector topics
117        remoteConnector.addTopic("encoded", encoder->out, "images");
118        remoteConnector.addTopic("detections", detectionNetwork->out, "images");
119        remoteConnector.addTopic("annotations", imageAnnotationsGenerator->output, "images");
120
121        // Start pipeline
122        pipeline.start();
123
124        // Register pipeline with remote connector
125        remoteConnector.registerPipeline(pipeline);
126
127        // Main loop
128        while(pipeline.isRunning() && !quitEvent) {
129            if(remoteConnector.waitKey(1) == 'q') {
130                pipeline.stop();
131                break;
132            }
133        }
134
135        // Cleanup
136        pipeline.stop();
137        pipeline.wait();
138
139    } catch(const std::exception& e) {
140        std::cerr << "Error: " << e.what() << std::endl;
141        return 1;
142    }
143
144    return 0;
145}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.