DepthAI
Software Stack

ON THIS PAGE

  • Demo
  • Pipeline
  • Source code

Neural Network Multi-input

Supported on:RVC2RVC4
Utilizes NeuralNetwork node to run a NN model which concatenates two input images and runs "inference" on the combined image.One of the input images is a static image sent from the host at startup (and it re-used for every frame), the other one is a live frame from the camera.

Demo

This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2import cv2
3import depthai as dai
4import numpy as np
5from pathlib import Path
6
7# Get the absolute path of the current script's directory
8script_dir = Path(__file__).resolve().parent
9examplesRoot = (script_dir / Path('../')).resolve()  # This resolves the parent directory correctly
10models = examplesRoot / 'models'
11tagImage = models / 'lenna.png'
12
13# Decode the image using OpenCV
14lenaImage = cv2.imread(str(tagImage.resolve()))
15lenaImage = cv2.resize(lenaImage, (256, 256))
16lenaImage = np.array(lenaImage)
17
18device = dai.Device()
19platform = device.getPlatform()
20if(platform == dai.Platform.RVC2):
21    daiType = dai.ImgFrame.Type.RGB888p
22elif(platform == dai.Platform.RVC4):
23    daiType = dai.ImgFrame.Type.RGB888i
24else:
25    raise RuntimeError("Platform not supported")
26
27daiLenaImage = dai.ImgFrame()
28
29daiLenaImage.setCvFrame(lenaImage, daiType)
30
31with dai.Pipeline(device) as pipeline:
32    model = dai.NNModelDescription("depthai-test-models/simple-concatenate-model")
33    model.platform = platform.name
34
35    nnArchive = dai.NNArchive(dai.getModelFromZoo(model))
36    cam = pipeline.create(dai.node.Camera).build()
37    camOut = cam.requestOutput((256,256), daiType)
38
39    neuralNetwork = pipeline.create(dai.node.NeuralNetwork)
40    neuralNetwork.setNNArchive(nnArchive)
41    camOut.link(neuralNetwork.inputs["image1"])
42    lennaInputQueue = neuralNetwork.inputs["image2"].createInputQueue()
43    # No need to send the second image everytime
44    neuralNetwork.inputs["image2"].setReusePreviousMessage(True)
45    qNNData = neuralNetwork.out.createOutputQueue()
46    pipeline.start()
47    lennaInputQueue.send(daiLenaImage)
48    while pipeline.isRunning():
49        inNNData: dai.NNData = qNNData.get()
50        tensor : np.ndarray = inNNData.getFirstTensor()
51        # Drop the first dimension
52        tensor = tensor.squeeze().astype(np.uint8)
53        # Check the shape - in case 3 is not the last dimension, permute it to the last
54        if tensor.shape[0] == 3:
55            tensor = np.transpose(tensor, (1, 2, 0))
56        print(tensor.shape)
57        cv2.imshow("Combined", tensor)
58        key = cv2.waitKey(1)
59        if key == ord('q'):
60            break

C++

1#include <iostream>
2#include <opencv2/opencv.hpp>
3#include <xtensor/containers/xarray.hpp>
4
5#include "depthai/depthai.hpp"
6#include "depthai/modelzoo/Zoo.hpp"
7
8int main() {
9    // Decode the image using OpenCV
10    cv::Mat lenaImage = cv::imread(LENNA_PATH);
11    cv::resize(lenaImage, lenaImage, cv::Size(256, 256));
12
13    // Create pipeline
14    dai::Pipeline pipeline;
15
16    // Create model description
17    dai::NNModelDescription model;
18    model.model = "depthai-test-models/simple-concatenate-model";
19    model.platform = pipeline.getDefaultDevice()->getPlatformAsString();
20    dai::NNArchive archive(dai::getModelFromZoo(model));
21
22    dai::ImgFrame::Type daiType;
23    if(pipeline.getDefaultDevice()->getPlatform() == dai::Platform::RVC2) {
24        daiType = dai::ImgFrame::Type::RGB888p;
25    } else {
26        daiType = dai::ImgFrame::Type::RGB888i;
27    }
28
29    // Create and set up nodes
30    auto cam = pipeline.create<dai::node::Camera>()->build();
31    auto camOut = cam->requestOutput(std::make_pair(256, 256), daiType);
32
33    auto neuralNetwork = pipeline.create<dai::node::NeuralNetwork>();
34    neuralNetwork->setNNArchive(archive);
35    camOut->link(neuralNetwork->inputs["image1"]);
36
37    auto lennaInputQueue = neuralNetwork->inputs["image2"].createInputQueue();
38    // No need to send the second image everytime
39    neuralNetwork->inputs["image2"].setReusePreviousMessage(true);
40
41    auto qNNData = neuralNetwork->out.createOutputQueue();
42
43    // Stt pipeline
44    pipeline.start();
45    // Create and set the image frame
46    auto daiLenaImage = std::make_shared<dai::ImgFrame>();
47    daiLenaImage->setCvFrame(lenaImage, daiType);
48    lennaInputQueue->send(daiLenaImage);
49
50    // Main loop
51    while(pipeline.isRunning()) {
52        auto inNNData = qNNData->get<dai::NNData>();
53        auto tensor = inNNData->getFirstTensor<float>();
54        auto tensor_uint8 = xt::eval(xt::squeeze(xt::cast<uint8_t>(tensor), 0));
55
56        cv::Mat output;
57        if(tensor_uint8.shape()[0] == 3) {
58            tensor_uint8 = xt::transpose(tensor_uint8, {1, 2, 0});
59        }
60        output = cv::Mat(tensor_uint8.shape()[0], tensor_uint8.shape()[1], CV_8UC3);
61        std::memcpy(output.data, tensor_uint8.data(), tensor_uint8.size());
62
63        cv::imshow("Combined", output);
64
65        char key = cv::waitKey(1);
66        if(key == 'q') {
67            break;
68        }
69    }
70
71    return 0;
72}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.