# Neural Depth Minimal

Minimal example showing basic [NeuralDepth](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/neural_depth.md)
usage with disparity output visualization.

## Pipeline

### examples/neural_depth_minimal.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 3, "node1Output": "out", "node1OutputGroup": "", "node2Id": 4, "node2Input": "input", "node2InputGroup": ""}, {"node1Id": 4, "node1Output": "right", "node1OutputGroup": "outputs", "node2Id": 5, "node2Input": "input2", "node2InputGroup": ""}, {"node1Id": 4, "node1Output": "left", "node1OutputGroup": "outputs", "node2Id": 5, "node2Input": "input1", "node2InputGroup": ""}, {"node1Id": 5, "node1Output": "output2", "node1OutputGroup": "", "node2Id": 6, "node2Input": "right", "node2InputGroup": "inputs"}, {"node1Id": 5, "node1Output": "output1", "node1OutputGroup": "", "node2Id": 6, "node2Input": "left", "node2InputGroup": "inputs"}, {"node1Id": 5, "node1Output": "output2", "node1OutputGroup": "", "node2Id": 2, "node2Input": "rightFrameInternal", "node2InputGroup": ""}, {"node1Id": 5, "node1Output": "output1", "node1OutputGroup": "", "node2Id": 2, "node2Input": "leftFrameInternal", "node2InputGroup": ""}, {"node1Id": 6, "node1Output": "out", "node1OutputGroup": "", "node2Id": 2, "node2Input": "nnDataInput", "node2InputGroup": ""}, {"node1Id": 1, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "right", "node2InputGroup": "inputs"}, {"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "left", "node2InputGroup": "inputs"}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "eepromId": 0, "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[6, {"alias": "neuralNetwork", "id": 6, "ioInfo": [[["", "passthrough"], {"blocking": false, "group": "", "id": 32, "name": "passthrough", "queueSize": 8, "type": 0, "waitForMessage": false}], [["inputs", "left"], {"blocking": false, "group": "inputs", "id": 30, "name": "left", "queueSize": 1, "type": 3, "waitForMessage": true}], [["", "out"], {"blocking": false, "group": "", "id": 31, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["inputs", "right"], {"blocking": false, "group": "inputs", "id": 29, "name": "right", "queueSize": 1, "type": 3, "waitForMessage": true}], [["", "in"], {"blocking": true, "group": "", "id": 28, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": true}]], "logLevel": 3, "name": "NeuralNetwork", "parentId": 2, "properties": {"backend": "", "backendProperties": {}, "blobSize": null, "blobUri": "", "deviceModel": 0, "modelSource": 0, "modelUri": "", "numFrames": 8, "numNCEPerThread": 0, "numShavesPerThread": 0, "numThreads": 0}}], [5, {"alias": "rectification", "id": 5, "ioInfo": [[["", "output2"], {"blocking": false, "group": "", "id": 27, "name": "output2", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "passthrough2"], {"blocking": false, "group": "", "id": 25, "name": "passthrough2", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "output1"], {"blocking": false, "group": "", "id": 26, "name": "output1", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "input2"], {"blocking": false, "group": "", "id": 23, "name": "input2", "queueSize": 4, "type": 3, "waitForMessage": false}], [["", "passthrough1"], {"blocking": false, "group": "", "id": 24, "name": "passthrough1", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "input1"], {"blocking": false, "group": "", "id": 22, "name": "input1", "queueSize": 4, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Rectification", "parentId": 2, "properties": {"enableRectification": true, "outputHeight": 480, "outputWidth": 768}}], [4, {"alias": "messageDemux", "id": 4, "ioInfo": [[["outputs", "right"], {"blocking": false, "group": "outputs", "id": 20, "name": "right", "queueSize": 8, "type": 0, "waitForMessage": false}], [["outputs", "left"], {"blocking": false, "group": "outputs", "id": 21, "name": "left", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "input"], {"blocking": true, "group": "", "id": 19, "name": "input", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "MessageDemux", "parentId": 2, "properties": {"dummy": 0}}], [3, {"alias": "sync", "id": 3, "ioInfo": [[["", "out"], {"blocking": false, "group": "", "id": 18, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["inputs", "left"], {"blocking": false, "group": "inputs", "id": 17, "name": "left", "queueSize": 10, "type": 3, "waitForMessage": false}], [["inputs", "right"], {"blocking": false, "group": "inputs", "id": 16, "name": "right", "queueSize": 10, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Sync", "parentId": 2, "properties": {"syncAttempts": -1, "syncThresholdNs": 10000000}}], [2, {"alias": "", "id": 2, "ioInfo": [[["", "confidence"], {"blocking": false, "group": "", "id": 15, "name": "confidence", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "edge"], {"blocking": false, "group": "", "id": 14, "name": "edge", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "depth"], {"blocking": false, "group": "", "id": 13, "name": "depth", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "disparity"], {"blocking": false, "group": "", "id": 12, "name": "disparity", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "rightFrameInternal"], {"blocking": false, "group": "", "id": 11, "name": "rightFrameInternal", "queueSize": 1, "type": 3, "waitForMessage": false}], [["", "leftFrameInternal"], {"blocking": false, "group": "", "id": 10, "name": "leftFrameInternal", "queueSize": 1, "type": 3, "waitForMessage": false}], [["", "nnDataInput"], {"blocking": true, "group": "", "id": 9, "name": "nnDataInput", "queueSize": 5, "type": 3, "waitForMessage": false}], [["", "inputConfig"], {"blocking": true, "group": "", "id": 8, "name": "inputConfig", "queueSize": 5, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "NeuralDepth", "parentId": -1, "properties": {"initialConfig": {"algorithmControl": {"customDepthUnitMultiplier": 1000.0, "depthUnit": 2}, "postProcessing": {"confidenceThreshold": 125, "edgeThreshold": 10, "temporalFilter": {"alpha": 0.4000000059604645, "delta": 3, "enable": false, "persistencyMode": 3}}}}}], [1, {"alias": "", "id": 1, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 7, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 6, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 5, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 4, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 2, "cameraName": "", "fps": 10.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 107739176, "aeRegion": {"height": 49550, "priority": 705, "width": 8433, "x": 12901, "y": 49917}, "afRegion": {"height": 1643, "priority": 24153, "width": 61552, "x": 0, "y": 0}, "antiBandingMode": 224, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 32, "brightness": 0, "captureIntent": 135, "chromaDenoise": 0, "cmdMask": 0, "contrast": 0, "controlMode": 254, "effectMode": 114, "enableHdr": false, "expCompensation": 0, "expManual": {"exposureTimeUs": 1615897884, "frameDurationUs": 0, "sensitivityIso": 0}, "frameSyncMode": 0, "lensPosAutoInfinity": 0, "lensPosAutoMacro": 155, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 64, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 0, "sceneMode": 59, "sharpness": 0, "strobeConfig": {"activeLevel": 0, "enable": 0, "gpioNumber": 0}, "strobeTimings": {"durationUs": 24153, "exposureBeginOffsetUs": 0, "exposureEndOffsetUs": 107739176}, "wbColorTemp": 0}, "isp3aFps": 0, "maxSizePoolIsp": 10485760, "maxSizePoolOutputs": null, "maxSizePoolRaw": 10485760, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolOutputs": null, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [1280, 800]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1, "sensorType": -1}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 1, "cameraName": "", "fps": 10.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 3395871759, "aeRegion": {"height": 23539, "priority": 1680057611, "width": 27103, "x": 57443, "y": 14509}, "afRegion": {"height": 8229, "priority": 143477788, "width": 58441, "x": 63661, "y": 34201}, "antiBandingMode": 159, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 16, "brightness": -56, "captureIntent": 16, "chromaDenoise": 0, "cmdMask": 0, "contrast": -118, "controlMode": 46, "effectMode": 31, "enableHdr": false, "expCompensation": -85, "expManual": {"exposureTimeUs": 4008679, "frameDurationUs": 2196813195, "sensitivityIso": 238377071}, "frameSyncMode": 243, "lensPosAutoInfinity": 238, "lensPosAutoMacro": 27, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 66, "lowPowerNumFramesDiscard": 21, "lumaDenoise": 0, "miscControls": [], "saturation": 68, "sceneMode": 246, "sharpness": 141, "strobeConfig": {"activeLevel": 31, "enable": 70, "gpioNumber": -56}, "strobeTimings": {"durationUs": 1359893532, "exposureBeginOffsetUs": -1803871563, "exposureEndOffsetUs": 115413366}, "wbColorTemp": 44429}, "isp3aFps": 0, "maxSizePoolIsp": 10485760, "maxSizePoolOutputs": null, "maxSizePoolRaw": 10485760, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolOutputs": null, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [1280, 800]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1, "sensorType": -1}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3

import cv2
import depthai as dai
import numpy as np

FPS = 25

# Create pipeline
with dai.Pipeline() as pipeline:
    cameraLeft = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B, sensorFps=FPS)
    cameraRight = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=FPS)
    leftOutput = cameraLeft.requestFullResolutionOutput()
    rightOutput = cameraRight.requestFullResolutionOutput()

    neuralDepth = pipeline.create(dai.node.NeuralDepth).build(leftOutput, rightOutput, dai.DeviceModelZoo.NEURAL_DEPTH_192X120)

    disparityQueue = neuralDepth.disparity.createOutputQueue()

    # Connect to device and start pipeline
    pipeline.start()
    maxDisparity = 1
    colorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET)
    colorMap[0] = [0, 0, 0]  # to make zero-disparity pixels black

    while pipeline.isRunning():
        disparityData = disparityQueue.get()
        assert isinstance(disparityData, dai.ImgFrame)
        npDisparity = disparityData.getFrame()
        maxDisparity = max(maxDisparity, np.max(npDisparity))
        colorizedDisparity = cv2.applyColorMap(((npDisparity / maxDisparity) * 255).astype(np.uint8), colorMap)
        cv2.imshow("disparity", colorizedDisparity)

        key = cv2.waitKey(1)
        if key == ord('q'):
            pipeline.stop()
            break

        if cv2.waitKey(1) == ord('q'):
            break
```

#### C++

```cpp
#include <atomic>
#include <csignal>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>

#include "depthai/depthai.hpp"

// Global flag to allow for a graceful shutdown
std::atomic<bool> quitEvent(false);

void signalHandler(int signum) {
    quitEvent = true;
}

int main() {
    // Set up signal handlers for clean exit on Ctrl+C
    signal(SIGTERM, signalHandler);
    signal(SIGINT, signalHandler);

    constexpr float FPS = 25.0f;

    // Create the DepthAI pipeline
    dai::Pipeline pipeline;

    // Define camera sources for the stereo pair
    auto cameraLeft = pipeline.create<dai::node::Camera>();
    cameraLeft->build(dai::CameraBoardSocket::CAM_B, std::nullopt, FPS);

    auto cameraRight = pipeline.create<dai::node::Camera>();
    cameraRight->build(dai::CameraBoardSocket::CAM_C, std::nullopt, FPS);

    // Request full resolution output from each camera
    auto* leftOutput = cameraLeft->requestFullResolutionOutput();
    auto* rightOutput = cameraRight->requestFullResolutionOutput();

    // Create and build the NeuralDepth node, linking the camera outputs to it
    auto neuralDepth = pipeline.create<dai::node::NeuralDepth>();
    neuralDepth->build(*leftOutput, *rightOutput, dai::DeviceModelZoo::NEURAL_DEPTH_LARGE);

    // Create an output queue to get the disparity frames from the node
    auto disparityQueue = neuralDepth->disparity.createOutputQueue();

    // Start the pipeline
    pipeline.start();

    // Variables for visualization
    double maxDisparity = 1.0;
    cv::Mat colorMap;

    // Pre-generate the color map for efficiency
    cv::Mat gray(256, 1, CV_8UC1);
    for(int i = 0; i < 256; i++) {
        gray.at<uchar>(i) = i;
    }
    cv::applyColorMap(gray, colorMap, cv::COLORMAP_JET);
    // Set the color for zero-disparity pixels to black, as in the Python example
    colorMap.at<cv::Vec3b>(0) = cv::Vec3b(0, 0, 0);

    while(!quitEvent && pipeline.isRunning()) {
        // Get the disparity data from the queue
        auto disparityData = disparityQueue->get<dai::ImgFrame>();
        cv::Mat npDisparity = disparityData->getFrame();

        // Find the current maximum disparity value to keep the visualization normalized
        double minVal, currentMax;
        cv::minMaxLoc(npDisparity, &minVal, &currentMax);
        if(currentMax > 0) {
            maxDisparity = std::max(maxDisparity, currentMax);
        }

        // Normalize the disparity image to a 0-255 scale for color mapping
        cv::Mat normalized;
        npDisparity.convertTo(normalized, CV_8UC1, 255.0 / maxDisparity);

        // Apply the color map to create a visual representation
        cv::Mat colorizedDisparity;
        cv::applyColorMap(normalized, colorizedDisparity, colorMap);

        // Display the colorized disparity map
        cv::imshow("disparity", colorizedDisparity);

        // Check for keyboard input to quit
        int key = cv::waitKey(1);
        if(key == 'q') {
            break;
        }
    }

    // The pipeline is stopped automatically when the 'pipeline' object goes out of scope
    // at the end of the main function.
    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
