# Neural Network Multi-input Combined

Utilizes [NeuralNetwork](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/neural_network.md) node to run a NN
model which concatenates two input images and runs "inference" on the combined image.

It constructs the [NNData message](https://docs.luxonis.com/software-v3/depthai/depthai-components/messages/nn_data.md) on the
host with two tensors (one for each input image) and sends the message to the device.

## Demo

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/neural_network_multi_input_combined.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 3, "node1Output": "out", "node1OutputGroup": "", "node2Id": 0, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "out", "node1OutputGroup": "", "node2Id": 4, "node2Input": "in", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[4, {"alias": "", "id": 4, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 4, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_out"}}], [3, {"alias": "", "id": 3, "ioInfo": [[["", "out"], {"blocking": false, "group": "", "id": 3, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkIn", "parentId": -1, "properties": {"maxDataSize": 5242880, "numFrames": 8, "streamName": "__x_0__in"}}], [0, {"alias": "", "id": 0, "ioInfo": [[["", "passthrough"], {"blocking": false, "group": "", "id": 2, "name": "passthrough", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "out"], {"blocking": false, "group": "", "id": 1, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "in"], {"blocking": true, "group": "", "id": 0, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": true}]], "logLevel": 3, "name": "NeuralNetwork", "parentId": -1, "properties": {"backend": "", "backendProperties": {}, "blobSize": 1876, "blobUri": "asset:__blob", "modelSource": 0, "modelUri": "", "numFrames": 8, "numNCEPerThread": 0, "numShavesPerThread": 0, "numThreads": 0}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3
import cv2
import depthai as dai
import numpy as np
from pathlib import Path

# Get the absolute path of the current script's directory
script_dir = Path(__file__).resolve().parent
examplesRoot = (script_dir / Path('../')).resolve()  # This resolves the parent directory correctly
models = examplesRoot / 'models'
tagImage = models / 'lenna.png'

# Decode the image using OpenCV
lenaImage = cv2.imread(str(tagImage.resolve()))
lenaImage = cv2.resize(lenaImage, (256, 256))
lenaImage = cv2.cvtColor(lenaImage, cv2.COLOR_BGR2RGB)
lenaImage = np.array(lenaImage)

device = dai.Device()
platform = device.getPlatform()
if platform == dai.Platform.RVC2:
    lenaImage = np.transpose(lenaImage, (2, 0, 1))
    nnTensorType = dai.TensorInfo.DataType.U8F
elif platform == dai.Platform.RVC4:
    # Add an empty dimension to the beginning
    lenaImage = np.expand_dims(lenaImage, axis=0)
    nnTensorType = dai.TensorInfo.DataType.FP16

inputNNData = dai.NNData()
inputNNData.addTensor("image1", lenaImage, dataType=nnTensorType)
inputNNData.addTensor("image2", lenaImage, dataType=nnTensorType)

with dai.Pipeline(device) as pipeline:
    model = dai.NNModelDescription("depthai-test-models/simple-concatenate-model")
    model.platform = platform.name

    nnArchive = dai.NNArchive(dai.getModelFromZoo(model))

    neuralNetwork = pipeline.create(dai.node.NeuralNetwork)
    neuralNetwork.setNNArchive(nnArchive)
    nnDataInputQueue = neuralNetwork.input.createInputQueue()
    qNNData = neuralNetwork.out.createOutputQueue()
    pipeline.start()
    while pipeline.isRunning():
        nnDataInputQueue.send(inputNNData)
        inNNData: dai.NNData = qNNData.get()
        tensor : np.ndarray = inNNData.getFirstTensor()
        # Drop the first dimension
        tensor = tensor.squeeze().astype(np.uint8)
        # Check the shape - in case 3 is not the last dimension, permute it to the last
        if tensor.shape[0] == 3:
            tensor = np.transpose(tensor, (1, 2, 0))
        cv2.imshow("Combined image", tensor)
        key = cv2.waitKey(1)
        if key == ord('q'):
            break
```

#### C++

```cpp
#include <iostream>
#include <opencv2/opencv.hpp>
#include <xtensor/containers/xadapt.hpp>
#include <xtensor/containers/xarray.hpp>

#include "depthai/depthai.hpp"
#include "depthai/modelzoo/Zoo.hpp"

int main() {
    // Decode the image using OpenCV
    cv::Mat lenaImageCv = cv::imread(LENNA_PATH);
    cv::resize(lenaImageCv, lenaImageCv, cv::Size(256, 256));
    cv::cvtColor(lenaImageCv, lenaImageCv, cv::COLOR_BGR2RGB);

    // Create xt::xarray from cv::Mat
    std::vector<uint8_t> lenaImageData(lenaImageCv.data, lenaImageCv.data + lenaImageCv.total() * lenaImageCv.channels());
    xt::xarray<uint8_t> lenaImage = xt::adapt(lenaImageData);

    // Create pipeline
    dai::Pipeline pipeline;

    // Create model description
    dai::NNModelDescription model;
    model.model = "depthai-test-models/simple-concatenate-model";
    model.platform = pipeline.getDefaultDevice()->getPlatformAsString();
    dai::NNArchive archive(dai::getModelFromZoo(model));

    // Create and set up nodes
    auto neuralNetwork = pipeline.create<dai::node::NeuralNetwork>();
    neuralNetwork->setNNArchive(archive);
    auto nnDataInputQueue = neuralNetwork->input.createInputQueue();
    auto qNNData = neuralNetwork->out.createOutputQueue();

    // Prepare input data
    auto inputNNData = std::make_shared<dai::NNData>();
    auto platform = pipeline.getDefaultDevice()->getPlatform();

    if(platform == dai::Platform::RVC2) {
        // Transpose to CHW format
        lenaImage = xt::transpose(lenaImage, {2, 0, 1});
        inputNNData->addTensor("image1", lenaImage, dai::TensorInfo::DataType::U8F);
        inputNNData->addTensor("image2", lenaImage, dai::TensorInfo::DataType::U8F);
    } else {
        // Add empty dimension at front (NHWC format)
        lenaImage = xt::expand_dims(lenaImage, 0);
        inputNNData->addTensor("image1", lenaImage, dai::TensorInfo::DataType::FP16);
        inputNNData->addTensor("image2", lenaImage, dai::TensorInfo::DataType::FP16);
    }

    // Start pipeline
    pipeline.start();

    // Main loop
    while(pipeline.isRunning()) {
        nnDataInputQueue->send(inputNNData);
        auto inNNData = qNNData->get<dai::NNData>();
        auto tensor = inNNData->getFirstTensor<float>();
        auto tensor_uint8 = xt::eval(xt::squeeze(xt::cast<uint8_t>(tensor), 0));

        cv::Mat output;
        if(tensor_uint8.shape()[0] == 3) {
            tensor_uint8 = xt::transpose(tensor_uint8, {1, 2, 0});
        }
        output = cv::Mat(tensor_uint8.shape()[0], tensor_uint8.shape()[1], CV_8UC3);
        std::memcpy(output.data, tensor_uint8.data(), tensor_uint8.size());

        cv::imshow("Combined image", output);

        char key = cv::waitKey(1);
        if(key == 'q') {
            break;
        }
    }

    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
