DepthAI v2 has been superseded by DepthAI v3. You are viewing legacy documentation.
DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • Demo
  • Pipeline Graph
  • Setup
  • Source code
  • Pipeline

Script change pipeline flow

This example shows how you can change the flow of data inside your pipeline in runtime using the Script node. In this example, we send a message from the host to choose whether we want Script node to forwards color frame to the MobileNetDetectionNetwork.

Demo

https://user-images.githubusercontent.com/18037362/187734814-df3b46c9-5e04-4a9d-bf6f-d738b40b4421.gif

Pipeline Graph

https://user-images.githubusercontent.com/18037362/187736249-db7ff175-fcea-4d4e-b567-f99087bd82ee.png

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2import depthai as dai
3import cv2
4from pathlib import Path
5import numpy as np
6
7parentDir = Path(__file__).parent
8nnPath = str((parentDir / Path('../models/mobilenet-ssd_openvino_2021.4_5shave.blob')).resolve().absolute())
9
10pipeline = dai.Pipeline()
11
12cam = pipeline.createColorCamera()
13cam.setBoardSocket(dai.CameraBoardSocket.CAM_A)
14cam.setInterleaved(False)
15cam.setIspScale(2,3)
16cam.setVideoSize(720,720)
17cam.setPreviewSize(300,300)
18
19xoutRgb = pipeline.create(dai.node.XLinkOut)
20xoutRgb.setStreamName('rgb')
21cam.video.link(xoutRgb.input)
22
23script = pipeline.createScript()
24
25xin = pipeline.create(dai.node.XLinkIn)
26xin.setStreamName('in')
27xin.out.link(script.inputs['toggle'])
28
29cam.preview.link(script.inputs['rgb'])
30script.setScript("""
31    toggle = False
32    while True:
33        msg = node.io['toggle'].tryGet()
34        if msg is not None:
35            toggle = msg.getData()[0]
36            node.warn('Toggle! Perform NN inferencing: ' + str(toggle))
37
38        frame = node.io['rgb'].get()
39
40        if toggle:
41            node.io['nn'].send(frame)
42""")
43
44nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
45nn.setBlobPath(nnPath)
46script.outputs['nn'].link(nn.input)
47
48xoutNn = pipeline.create(dai.node.XLinkOut)
49xoutNn.setStreamName('nn')
50nn.out.link(xoutNn.input)
51
52# Connect to device with pipeline
53with dai.Device(pipeline) as device:
54    inQ = device.getInputQueue("in")
55    qRgb = device.getOutputQueue("rgb")
56    qNn = device.getOutputQueue("nn")
57
58    runNn = False
59
60    def frameNorm(frame, bbox):
61        normVals = np.full(len(bbox), frame.shape[0])
62        normVals[::2] = frame.shape[1]
63        return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
64
65    color = (255, 127, 0)
66    def drawDetections(frame, detections):
67        for detection in detections:
68            bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
69            cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
70            cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
71
72
73    while True:
74        frame = qRgb.get().getCvFrame()
75
76        if qNn.has():
77            detections = qNn.get().detections
78            drawDetections(frame, detections)
79
80        cv2.putText(frame, f"NN inferencing: {runNn}", (20,20), cv2.FONT_HERSHEY_TRIPLEX, 0.7, color)
81        cv2.imshow('Color frame', frame)
82
83        key = cv2.waitKey(1)
84        if key == ord('q'):
85            break
86        elif key == ord('t'):
87            runNn = not runNn
88            print(f"{'Enabling' if runNn else 'Disabling'} NN inferencing")
89            buf = dai.Buffer()
90            buf.setData(runNn)
91            inQ.send(buf)

C++

1#include <iostream>
2
3// Includes common necessary includes for development using depthai library
4#include "depthai/depthai.hpp"
5
6int main() {
7    dai::Pipeline pipeline;
8
9    auto cam = pipeline.create<dai::node::ColorCamera>();
10    cam->setBoardSocket(dai::CameraBoardSocket::CAM_A);
11    cam->setInterleaved(false);
12    cam->setIspScale(2, 3);
13    cam->setVideoSize(720, 720);
14    cam->setPreviewSize(300, 300);
15
16    auto xoutRgb = pipeline.create<dai::node::XLinkOut>();
17    xoutRgb->setStreamName("rgb");
18    cam->video.link(xoutRgb->input);
19
20    auto script = pipeline.create<dai::node::Script>();
21
22    auto xin = pipeline.create<dai::node::XLinkIn>();
23    xin->setStreamName("in");
24    xin->out.link(script->inputs["toggle"]);
25
26    cam->preview.link(script->inputs["rgb"]);
27    script->setScript(R"(
28        toggle = False
29        while True:
30            msg = node.io['toggle'].tryGet()
31            if msg is not None:
32                toggle = msg.getData()[0]
33                node.warn('Toggle! Perform NN inferencing: ' + str(toggle))
34            frame = node.io['rgb'].get()
35            if toggle:
36                node.io['nn'].send(frame)
37    )");
38
39    auto nn = pipeline.create<dai::node::MobileNetDetectionNetwork>();
40    nn->setBlobPath(BLOB_PATH);
41    script->outputs["nn"].link(nn->input);
42
43    auto xoutNn = pipeline.create<dai::node::XLinkOut>();
44    xoutNn->setStreamName("nn");
45    nn->out.link(xoutNn->input);
46
47    // Connect to device with pipeline
48    dai::Device device(pipeline);
49    auto inQ = device.getInputQueue("in");
50    auto qRgb = device.getOutputQueue("rgb");
51    auto qNn = device.getOutputQueue("nn");
52
53    bool runNn = false;
54
55    auto color = cv::Scalar(255, 127, 0);
56
57    auto drawDetections = [color](cv::Mat frame, std::vector<dai::ImgDetection>& detections) {
58        for(auto& detection : detections) {
59            int x1 = detection.xmin * frame.cols;
60            int y1 = detection.ymin * frame.rows;
61            int x2 = detection.xmax * frame.cols;
62            int y2 = detection.ymax * frame.rows;
63
64            std::stringstream confStr;
65            confStr << std::fixed << std::setprecision(2) << detection.confidence * 100;
66            cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
67            cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX);
68        }
69    };
70
71    while(true) {
72        auto frame = qRgb->get<dai::ImgFrame>()->getCvFrame();
73        auto imgDetections = qNn->tryGet<dai::ImgDetections>();
74        if(imgDetections != nullptr) {
75            auto detections = imgDetections->detections;
76            drawDetections(frame, detections);
77        }
78        std::string frameText = "NN inferencing: ";
79        if(runNn) {
80            frameText += "On";
81        } else {
82            frameText += "Off";
83        }
84        cv::putText(frame, frameText, cv::Point(20, 20), cv::FONT_HERSHEY_TRIPLEX, 0.7, color);
85        cv::imshow("Color frame", frame);
86
87        int key = cv::waitKey(1);
88        if(key == 'q') {
89            return 0;
90        } else if(key == 't') {
91            if(runNn) {
92                std::cout << "Disabling\n";
93            } else {
94                std::cout << "Enabling\n";
95            }
96            runNn = !runNn;
97            auto buf = dai::Buffer();
98            std::vector<uint8_t> messageData;
99            messageData.push_back(runNn);
100            buf.setData(messageData);
101            inQ->send(buf);
102        }
103    }
104}

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.