DepthAI v2 has been superseded by DepthAI v3. You are viewing legacy documentation.
DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • Demo
  • Setup
  • Source code
  • Pipeline

Multi-Input Frame Concatenation

Example concatenates all 3 inputs with a simple custom model created with PyTorch (link here, tutorial here). It uses NeuralNetwork's multiple input feature and links all 3 camera streams directly to the NeuralNetwork node.

Demo

https://user-images.githubusercontent.com/18037362/134209980-09c6e2f9-8a26-45d5-a6ad-c31d9e2816e1.png

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import sys
5import numpy as np
6import cv2
7import depthai as dai
8SHAPE = 300
9
10# Get argument first
11nnPath = str((Path(__file__).parent / Path('../models/concat_openvino_2021.4_6shave.blob')).resolve().absolute())
12if len(sys.argv) > 1:
13    nnPath = sys.argv[1]
14
15if not Path(nnPath).exists():
16    import sys
17    raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
18
19p = dai.Pipeline()
20p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
21
22camRgb = p.createColorCamera()
23camRgb.setPreviewSize(SHAPE, SHAPE)
24camRgb.setInterleaved(False)
25camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
26
27def create_mono(p, socket):
28    mono = p.create(dai.node.MonoCamera)
29    mono.setBoardSocket(socket)
30    mono.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
31
32    # ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
33    manip = p.create(dai.node.ImageManip)
34    manip.initialConfig.setResize(300, 300)
35    manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
36    mono.out.link(manip.inputImage)
37    return manip.out
38
39# NN that detects faces in the image
40nn = p.createNeuralNetwork()
41nn.setBlobPath(nnPath)
42nn.setNumInferenceThreads(2)
43
44camRgb.preview.link(nn.inputs['img2'])
45create_mono(p, dai.CameraBoardSocket.CAM_B).link(nn.inputs['img1'])
46create_mono(p, dai.CameraBoardSocket.CAM_C).link(nn.inputs['img3'])
47
48# Send bouding box from the NN to the host via XLink
49nn_xout = p.createXLinkOut()
50nn_xout.setStreamName("nn")
51nn.out.link(nn_xout.input)
52
53# Pipeline is defined, now we can connect to the device
54with dai.Device(p) as device:
55    qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
56    shape = (3, SHAPE, SHAPE * 3)
57
58    while True:
59        inNn = np.array(qNn.get().getFirstLayerFp16())
60        # Planar INT8 frame
61        frame = inNn.reshape(shape).astype(np.uint8).transpose(1, 2, 0)
62
63        cv2.imshow("Concat", frame)
64
65        if cv2.waitKey(1) == ord('q'):
66            break

C++

1#include <chrono>
2#include <cstdio>
3#include <iostream>
4
5// Inludes common necessary includes for development using depthai library
6#include "depthai/depthai.hpp"
7#include "utility.hpp"
8
9int main(int argc, char** argv) {
10    using namespace std;
11    // Default blob path provided by Hunter private data download
12    // Applicable for easier example usage only
13    std::string nnPath(BLOB_PATH);
14
15    // If path to blob specified, use that
16    if(argc > 1) {
17        nnPath = std::string(argv[1]);
18    }
19
20    // Print which blob we are using
21    printf("Using blob at path: %s\n", nnPath.c_str());
22
23    // Create pipeline
24    dai::Pipeline pipeline;
25    pipeline.setOpenVINOVersion(dai::OpenVINO::Version::VERSION_2021_4);
26
27    // Define sources and outputs
28    auto camRgb = pipeline.create<dai::node::ColorCamera>();
29    camRgb->setPreviewSize(300, 300);  // NN input
30    camRgb->setInterleaved(false);
31    camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
32
33    auto right = pipeline.create<dai::node::MonoCamera>();
34    right->setCamera("right");
35    right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
36
37    auto manipRight = pipeline.create<dai::node::ImageManip>();
38    manipRight->initialConfig.setResize(300, 300);
39    manipRight->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p);
40    right->out.link(manipRight->inputImage);
41
42    auto left = pipeline.create<dai::node::MonoCamera>();
43    left->setCamera("left");
44    left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
45
46    auto manipLeft = pipeline.create<dai::node::ImageManip>();
47    manipLeft->initialConfig.setResize(300, 300);
48    manipLeft->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p);
49    left->out.link(manipLeft->inputImage);
50
51    auto nn = pipeline.create<dai::node::NeuralNetwork>();
52    nn->setBlobPath(nnPath);
53    nn->setNumInferenceThreads(2);
54
55    manipLeft->out.link(nn->inputs["img1"]);
56    camRgb->preview.link(nn->inputs["img2"]);
57    manipRight->out.link(nn->inputs["img3"]);
58
59    auto xout = pipeline.create<dai::node::XLinkOut>();
60    xout->setStreamName("nn");
61    nn->out.link(xout->input);
62
63    // Connect to device and start pipeline
64    dai::Device device(pipeline);
65
66    // Output queues will be used to get the rgb frames and nn data from the outputs defined above
67    auto qNn = device.getOutputQueue("nn", 4, false);
68
69    while(true) {
70        auto inNn = qNn->get<dai::NNData>();
71        cv::imshow("Concat", fromPlanarFp16(inNn->getFirstLayerFp16(), 900, 300));
72
73        int key = cv::waitKey(1);
74        if(key == 'q' || key == 'Q') {
75            return 0;
76        }
77    }
78    return 0;
79}

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.