# RGBD Point Cloud Visualization

This example demonstrates how to use the [RGBD](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/rgbd.md)
node to generate RGB-D data and visualize it as a 3D point cloud. The core pipeline remains the same, while visualization can be
done in one of three interchangeable ways:

 * Rerun
 * Open3D
 * OAK Visualizer - Native viewer bundled with DepthAI

The only difference between these modes is in how the RGBD and point cloud output is displayed. You can switch the visualizer in
the code as needed.

## Demo

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/rgbd.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 5, "node1Output": "out", "node1OutputGroup": "", "node2Id": 7, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 3, "node1Output": "depth", "node1OutputGroup": "", "node2Id": 5, "node2Input": "inDepthSync", "node2InputGroup": "inputs"}, {"node1Id": 2, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 5, "node2Input": "inColorSync", "node2InputGroup": "inputs"}, {"node1Id": 2, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "inputAlignTo", "node2InputGroup": ""}, {"node1Id": 1, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "right", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "left", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[7, {"alias": "", "id": 7, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 32, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_5__out"}}], [5, {"alias": "sync", "id": 5, "ioInfo": [[["", "out"], {"blocking": false, "group": "", "id": 31, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["inputs", "inColorSync"], {"blocking": false, "group": "inputs", "id": 30, "name": "inColorSync", "queueSize": 1, "type": 3, "waitForMessage": false}], [["inputs", "inDepthSync"], {"blocking": false, "group": "inputs", "id": 29, "name": "inDepthSync", "queueSize": 1, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Sync", "parentId": 4, "properties": {"syncAttempts": -1, "syncThresholdNs": 10000000}}], [3, {"alias": "", "id": 3, "ioInfo": [[["", "confidenceMap"], {"blocking": false, "group": "", "id": 28, "name": "confidenceMap", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "debugDispCostDump"], {"blocking": false, "group": "", "id": 27, "name": "debugDispCostDump", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "debugExtDispLrCheckIt2"], {"blocking": false, "group": "", "id": 26, "name": "debugExtDispLrCheckIt2", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "debugDispLrCheckIt2"], {"blocking": false, "group": "", "id": 24, "name": "debugDispLrCheckIt2", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "debugExtDispLrCheckIt1"], {"blocking": false, "group": "", "id": 25, "name": "debugExtDispLrCheckIt1", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "debugDispLrCheckIt1"], {"blocking": false, "group": "", "id": 23, "name": "debugDispLrCheckIt1", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "outConfig"], {"blocking": false, "group": "", "id": 22, "name": "outConfig", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "rectifiedRight"], {"blocking": false, "group": "", "id": 21, "name": "rectifiedRight", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "rectifiedLeft"], {"blocking": false, "group": "", "id": 20, "name": "rectifiedLeft", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "depth"], {"blocking": false, "group": "", "id": 16, "name": "depth", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "right"], {"blocking": true, "group": "", "id": 15, "name": "right", "queueSize": 3, "type": 3, "waitForMessage": false}], [["", "left"], {"blocking": true, "group": "", "id": 14, "name": "left", "queueSize": 3, "type": 3, "waitForMessage": false}], [["", "syncedRight"], {"blocking": false, "group": "", "id": 19, "name": "syncedRight", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "syncedLeft"], {"blocking": false, "group": "", "id": 18, "name": "syncedLeft", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "inputAlignTo"], {"blocking": false, "group": "", "id": 13, "name": "inputAlignTo", "queueSize": 1, "type": 3, "waitForMessage": true}], [["", "disparity"], {"blocking": false, "group": "", "id": 17, "name": "disparity", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "inputConfig"], {"blocking": true, "group": "", "id": 12, "name": "inputConfig", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "StereoDepth", "parentId": -1, "properties": {"alphaScaling": null, "baseline": null, "depthAlignCamera": -1, "depthAlignmentUseSpecTranslation": null, "disparityToDepthUseSpecTranslation": null, "enableFrameSync": true, "enableRectification": true, "enableRuntimeStereoModeSwitch": false, "focalLength": null, "focalLengthFromCalibration": true, "height": null, "initialConfig": {"algorithmControl": {"centerAlignmentShiftFactor": null, "customDepthUnitMultiplier": 1000.0, "depthAlign": 1, "depthUnit": 2, "disparityShift": 0, "enableExtended": false, "enableLeftRightCheck": true, "enableSubpixel": true, "enableSwLeftRightCheck": false, "leftRightCheckThreshold": 10, "numInvalidateEdgePixels": 0, "subpixelFractionalBits": 3}, "censusTransform": {"enableMeanMode": true, "kernelMask": 0, "kernelSize": -1, "noiseThresholdOffset": 1, "noiseThresholdScale": 1, "threshold": 0}, "confidenceMetrics": {"flatnessConfidenceThreshold": 5, "flatnessConfidenceWeight": 2, "flatnessOverride": false, "motionVectorConfidenceThreshold": 1, "motionVectorConfidenceWeight": 10, "occlusionConfidenceWeight": 20}, "costAggregation": {"divisionFactor": 1, "horizontalPenaltyCostP1": 250, "horizontalPenaltyCostP2": 500, "p1Config": {"defaultValue": 45, "edgeThreshold": 15, "edgeValue": 40, "enableAdaptive": true, "smoothThreshold": 5, "smoothValue": 49}, "p2Config": {"defaultValue": 95, "edgeValue": 90, "enableAdaptive": true, "smoothValue": 99}, "verticalPenaltyCostP1": 250, "verticalPenaltyCostP2": 500}, "costMatching": {"confidenceThreshold": 15, "disparityWidth": 1, "enableCompanding": false, "enableSwConfidenceThresholding": false, "invalidDisparityValue": 0, "linearEquationParameters": {"alpha": 0, "beta": 2, "threshold": 127}}, "filtersBackend": 2, "postProcessing": {"adaptiveMedianFilter": {"confidenceThreshold": 200, "enable": true}, "bilateralSigmaValue": 0, "brightnessFilter": {"maxBrightness": 256, "minBrightness": 0}, "decimationFilter": {"decimationFactor": 2, "decimationMode": 0}, "filteringOrder": [1, 3, 2, 4, 5], "holeFilling": {"enable": true, "fillConfidenceThreshold": 210, "highConfidenceThreshold": 100, "invalidateDisparities": true, "minValidDisparity": 3}, "median": 7, "spatialFilter": {"alpha": 0.5, "delta": 3, "enable": true, "holeFillingRadius": 1, "numIterations": 1}, "speckleFilter": {"differenceThreshold": 2, "enable": true, "speckleRange": 200}, "temporalFilter": {"alpha": 0.5, "delta": 3, "enable": true, "persistencyMode": 3}, "thresholdFilter": {"maxRange": 10000, "minRange": 0}}}, "mesh": {"meshLeftUri": "", "meshRightUri": "", "meshSize": null, "stepHeight": 16, "stepWidth": 16}, "numFramesPool": 3, "numPostProcessingMemorySlices": 3, "numPostProcessingShaves": 3, "outHeight": null, "outKeepAspectRatio": true, "outWidth": null, "rectificationUseSpecTranslation": null, "rectifyEdgeFillColor": 0, "useHomographyRectification": false, "width": null}}], [2, {"alias": "", "id": 2, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 11, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 10, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 9, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 8, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 0, "aeRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "antiBandingMode": 0, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 0, "brightness": 0, "captureIntent": 0, "chromaDenoise": 0, "cmdMask": 0, "contrast": 0, "controlMode": 0, "effectMode": 0, "enableHdr": false, "expCompensation": 0, "expManual": {"exposureTimeUs": 0, "frameDurationUs": 0, "sensitivityIso": 0}, "frameSyncMode": 0, "lensPosAutoInfinity": 0, "lensPosAutoMacro": 0, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 0, "sceneMode": 0, "sharpness": 0, "strobeConfig": {"activeLevel": 0, "enable": 0, "gpioNumber": 0}, "strobeTimings": {"durationUs": 0, "exposureBeginOffsetUs": 0, "exposureEndOffsetUs": 0}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": true, "fps": {"value": {"index": 0, "value": 30.0}}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [640, 400]}}, "type": 9}], "resolutionHeight": -1, "resolutionWidth": -1}}], [1, {"alias": "", "id": 1, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 7, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 6, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 5, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 4, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 2, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 0, "aeRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "antiBandingMode": 0, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 0, "brightness": 0, "captureIntent": 0, "chromaDenoise": 0, "cmdMask": 0, "contrast": 0, "controlMode": 0, "effectMode": 0, "enableHdr": false, "expCompensation": 0, "expManual": {"exposureTimeUs": 0, "frameDurationUs": 0, "sensitivityIso": 0}, "frameSyncMode": 0, "lensPosAutoInfinity": 0, "lensPosAutoMacro": 0, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 0, "sceneMode": 0, "sharpness": 0, "strobeConfig": {"activeLevel": 0, "enable": 0, "gpioNumber": 0}, "strobeTimings": {"durationUs": 0, "exposureBeginOffsetUs": 0, "exposureEndOffsetUs": 0}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [640, 400]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 1, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 0, "aeRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "antiBandingMode": 0, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 0, "brightness": 0, "captureIntent": 0, "chromaDenoise": 0, "cmdMask": 0, "contrast": 0, "controlMode": 0, "effectMode": 0, "enableHdr": false, "expCompensation": 0, "expManual": {"exposureTimeUs": 0, "frameDurationUs": 0, "sensitivityIso": 0}, "frameSyncMode": 0, "lensPosAutoInfinity": 0, "lensPosAutoMacro": 0, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 0, "sceneMode": 0, "sharpness": 0, "strobeConfig": {"activeLevel": 0, "enable": 0, "gpioNumber": 0}, "strobeTimings": {"durationUs": 0, "exposureBeginOffsetUs": 0, "exposureEndOffsetUs": 0}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [640, 400]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code with OAK Visualizer in the loop

#### Python

```python
import time
import depthai as dai

from argparse import ArgumentParser

NEURAL_FPS = 8
STEREO_DEFAULT_FPS = 30
TOF_DEFAULT_FPS = 30

parser = ArgumentParser()
parser.add_argument("--webSocketPort", type=int, default=8765)
parser.add_argument("--httpPort", type=int, default=8082)
parser.add_argument("--depthSource", type=str, default="stereo", choices=["stereo", "neural", "tof"])
args = parser.parse_args()

with dai.Pipeline() as p:
    remoteConnector = dai.RemoteConnection(
        webSocketPort=args.webSocketPort, httpPort=args.httpPort
    )

    size = (640, 400)
    if args.depthSource == "neural":
        fps = NEURAL_FPS
    elif args.depthSource == "tof":
        fps = TOF_DEFAULT_FPS
    else:
        fps = STEREO_DEFAULT_FPS

    if args.depthSource == "stereo":
        color = p.create(dai.node.Camera).build(sensorFps=fps)
        left = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B, sensorFps=fps)
        right = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps)
        depthSource = p.create(dai.node.StereoDepth)
        depthSource.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
        depthSource.setRectifyEdgeFillColor(0)
        depthSource.enableDistortionCorrection(True)
        left.requestOutput(size).link(depthSource.left)
        right.requestOutput(size).link(depthSource.right)
    elif args.depthSource == "neural":
        color = p.create(dai.node.Camera).build(sensorFps=fps)
        left = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B, sensorFps=fps)
        right = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps)
        depthSource = p.create(dai.node.NeuralDepth).build(left.requestOutput(size), right.requestOutput(size), dai.DeviceModelZoo.NEURAL_DEPTH_LARGE)
    elif args.depthSource == "tof":
        color = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps)
        socket, preset_mode = dai.CameraBoardSocket.AUTO, dai.ImageFiltersPresetMode.TOF_MID_RANGE
        depthSource = p.create(dai.node.ToF).build(socket, preset_mode)
    else:
        raise ValueError(f"Invalid depth source: {args.depthSource}")

    rgbd = p.create(dai.node.RGBD).build(color, depthSource, size, fps)

    remoteConnector.addTopic("pcl", rgbd.pcl, "common")
    p.start()
    remoteConnector.registerPipeline(p)

    print("Pipeline started with depth source: ", args.depthSource)

    while p.isRunning():
        key = remoteConnector.waitKey(1)
        if key == ord("q"):
            print("Got q key from the remote connection!")
            break
```

#### C++

```cpp
#include <argparse/argparse.hpp>
#include <csignal>
#include <depthai/remote_connection/RemoteConnection.hpp>
#include <iostream>

#include "depthai/depthai.hpp"

constexpr float NEURAL_FPS = 8.0f;
constexpr float STEREO_DEFAULT_FPS = 30.0f;

// Signal handling for clean shutdown
static bool isRunning = true;
void signalHandler(int signum) {
    (void)signum;
    isRunning = false;
}

int main(int argc, char** argv) {
    // Initialize argument parser
    argparse::ArgumentParser program("visualizer_rgbd", "1.0.0");
    program.add_description("RGBD point cloud visualizer with configurable depth source");
    program.add_argument("--webSocketPort").default_value(8765).scan<'i', int>().help("WebSocket port for remote connection");
    program.add_argument("--httpPort").default_value(8082).scan<'i', int>().help("HTTP port for remote connection");
    program.add_argument("--depthSource").default_value(std::string("stereo")).help("Depth source: stereo, neural, tof");

    // Parse arguments
    try {
        program.parse_args(argc, argv);
    } catch(const std::runtime_error& err) {
        std::cerr << err.what() << std::endl;
        std::cerr << program;
        return EXIT_FAILURE;
    }

    // Get arguments
    int webSocketPort = program.get<int>("--webSocketPort");
    int httpPort = program.get<int>("--httpPort");
    std::string depthSourceArg = program.get<std::string>("--depthSource");

    // Validate depth source argument
    if(depthSourceArg != "stereo" && depthSourceArg != "neural" && depthSourceArg != "tof") {
        std::cerr << "Invalid depth source: " << depthSourceArg << std::endl;
        std::cerr << "Valid options are: stereo, neural, tof" << std::endl;
        return EXIT_FAILURE;
    }

    // Register signal handler
    std::signal(SIGINT, signalHandler);

    try {
        // Create RemoteConnection
        dai::RemoteConnection remoteConnector(dai::RemoteConnection::DEFAULT_ADDRESS, webSocketPort, true, httpPort);

        // Create pipeline
        dai::Pipeline pipeline;

        float fps = STEREO_DEFAULT_FPS;
        if(depthSourceArg == "neural") {
            fps = NEURAL_FPS;
        }

        const std::pair<int, int> size = std::make_pair(640, 400);

        // Create color camera
        auto color = pipeline.create<dai::node::Camera>();
        color->build(dai::CameraBoardSocket::AUTO, std::nullopt, fps);

        // Create depth source based on argument
        dai::node::DepthSource depthSource;

        if(depthSourceArg == "stereo") {
            auto left = pipeline.create<dai::node::Camera>();
            auto right = pipeline.create<dai::node::Camera>();
            auto stereo = pipeline.create<dai::node::StereoDepth>();

            left->build(dai::CameraBoardSocket::CAM_B, std::nullopt, fps);
            right->build(dai::CameraBoardSocket::CAM_C, std::nullopt, fps);

            stereo->setSubpixel(true);
            stereo->setExtendedDisparity(false);
            stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::DEFAULT);
            stereo->setLeftRightCheck(true);
            stereo->setRectifyEdgeFillColor(0);  // black, to better see the cutout
            stereo->enableDistortionCorrection(true);
            stereo->initialConfig->setLeftRightCheckThreshold(10);

            left->requestOutput(size, std::nullopt, dai::ImgResizeMode::CROP)->link(stereo->left);
            right->requestOutput(size, std::nullopt, dai::ImgResizeMode::CROP)->link(stereo->right);

            depthSource = stereo;
        } else if(depthSourceArg == "neural") {
            auto left = pipeline.create<dai::node::Camera>();
            auto right = pipeline.create<dai::node::Camera>();

            left->build(dai::CameraBoardSocket::CAM_B, std::nullopt, fps);
            right->build(dai::CameraBoardSocket::CAM_C, std::nullopt, fps);

            auto neuralDepth = pipeline.create<dai::node::NeuralDepth>();
            neuralDepth->build(*left->requestFullResolutionOutput(), *right->requestFullResolutionOutput(), dai::DeviceModelZoo::NEURAL_DEPTH_LARGE);

            depthSource = neuralDepth;
        } else if(depthSourceArg == "tof") {
            auto tof = pipeline.create<dai::node::ToF>();
            depthSource = tof;
        }

        // Create RGBD node using the unified build method with DepthSource variant
        auto rgbd = pipeline.create<dai::node::RGBD>();
        rgbd->build(color, depthSource, size, fps);

        remoteConnector.addTopic("pcl", rgbd->pcl);
        pipeline.start();
        remoteConnector.registerPipeline(pipeline);

        auto device = pipeline.getDefaultDevice();
        device->setIrLaserDotProjectorIntensity(0.7);

        std::cout << "Pipeline started with depth source: " << depthSourceArg << std::endl;

        // Main loop
        while(isRunning && pipeline.isRunning()) {
            int key = remoteConnector.waitKey(1);
            if(key == 'q') {
                std::cout << "Got 'q' key from the remote connection!" << std::endl;
                break;
            }
        }

        std::cout << "Pipeline stopped." << std::endl;

    } catch(const std::exception& e) {
        std::cerr << "Error: " << e.what() << std::endl;
        return EXIT_FAILURE;
    }

    return EXIT_SUCCESS;
}
```

## Source code with Open3D in the loop

#### Python

```python
import time
import depthai as dai
import sys
import numpy as np

try:
    import open3d as o3d
except ImportError:
    sys.exit(
        "Critical dependency missing: Open3D. Please install it using the command: '{} -m pip install open3d' and then rerun the script.".format(
            sys.executable
        )
    )

class O3DNode(dai.node.ThreadedHostNode):
    def __init__(self):
        dai.node.ThreadedHostNode.__init__(self)
        self.inputPCL = self.createInput()

    def run(self):
        def key_callback(vis, action, mods):
            global isRunning
            if action == 0:
                isRunning = False

        vis = o3d.visualization.VisualizerWithKeyCallback()
        vis.create_window()
        vis.register_key_action_callback(81, key_callback)
        pcd = o3d.geometry.PointCloud()
        coordinateFrame = o3d.geometry.TriangleMesh.create_coordinate_frame(
            size=1000, origin=[0, 0, 0]
        )
        vis.add_geometry(coordinateFrame)
        first = True
        while self.mainLoop():
            try:
                inPointCloud = self.inputPCL.tryGet()
            except dai.MessageQueue.QueueException:
                return # Pipeline closed
            if inPointCloud is not None:
                points, colors = inPointCloud.getPointsRGB()
                pcd.points = o3d.utility.Vector3dVector(points.astype(np.float64))
                colors = (colors / 255.0).astype(np.float64)
                pcd.colors = o3d.utility.Vector3dVector(np.delete(colors, 3, 1))
                if first:
                    vis.add_geometry(pcd)
                    first = False
                else:
                    vis.update_geometry(pcd)
            vis.poll_events()
            vis.update_renderer()

        vis.destroy_window()

# Create pipeline

with dai.Pipeline() as p:
    fps = 30
    # Define sources and outputs
    left = p.create(dai.node.Camera)
    right = p.create(dai.node.Camera)
    color = p.create(dai.node.Camera)
    stereo = p.create(dai.node.StereoDepth)
    rgbd = p.create(dai.node.RGBD).build()
    align = None
    color.build()
    o3dViewer = p.create(O3DNode)
    left.build(dai.CameraBoardSocket.CAM_B)
    right.build(dai.CameraBoardSocket.CAM_C)
    out = None

    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
    stereo.setRectifyEdgeFillColor(0)
    stereo.enableDistortionCorrection(True)

    # Linking
    left.requestOutput((640, 400)).link(stereo.left)
    right.requestOutput((640, 400)).link(stereo.right)
    platform = p.getDefaultDevice().getPlatform()
    if platform == dai.Platform.RVC4:
        out = color.requestOutput((640, 400), dai.ImgFrame.Type.RGB888i, enableUndistortion=True)
        align = p.create(dai.node.ImageAlign)
        stereo.depth.link(align.input)
        out.link(align.inputAlignTo)
        align.outputAligned.link(rgbd.inDepth)
    else:
        out = color.requestOutput(
            (640, 400), dai.ImgFrame.Type.RGB888i, dai.ImgResizeMode.CROP, 30, True
        )
        stereo.depth.link(rgbd.inDepth)
        out.link(stereo.inputAlignTo)
    out.link(rgbd.inColor)

    rgbd.pcl.link(o3dViewer.inputPCL)

    p.start()
    while p.isRunning():
        time.sleep(1)
```

## Source code with Rerun in the loop

#### Python

```python
import time
import depthai as dai
import sys

from pathlib import Path
installExamplesStr = Path(__file__).absolute().parents[1] / 'install_requirements.py --install_rerun'
try:
    import rerun as rr
except ImportError:
    sys.exit("Critical dependency missing: Rerun. Please install it using the command: '{} {}' and then rerun the script.".format(sys.executable, installExamplesStr))

import cv2

class RerunNode(dai.node.ThreadedHostNode):
    def __init__(self):
        dai.node.ThreadedHostNode.__init__(self)
        self.inputPCL = self.createInput()

    def run(self):
        rr.init("", spawn=True)
        rr.log("world", rr.ViewCoordinates.RDF)
        rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001]))
        while self.mainLoop():
            try:
                inPointCloud = self.inputPCL.get()
            except dai.MessageQueue.QueueException:
                return # Pipeline closed
            if inPointCloud is not None:
                points, colors = inPointCloud.getPointsRGB()
                rr.log("world/pcl", rr.Points3D(points, colors=colors, radii=[0.01]))

# Create pipeline

with dai.Pipeline() as p:
    fps = 30
    # Define sources and outputs
    left = p.create(dai.node.Camera)
    right = p.create(dai.node.Camera)
    color = p.create(dai.node.Camera)
    stereo = p.create(dai.node.StereoDepth)
    rgbd = p.create(dai.node.RGBD).build()
    align = None
    color.build()
    rerunViewer = p.create(RerunNode)
    left.build(dai.CameraBoardSocket.CAM_B)
    right.build(dai.CameraBoardSocket.CAM_C)
    out = None

    stereo.setRectifyEdgeFillColor(0)
    stereo.enableDistortionCorrection(True)
    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
    stereo.initialConfig.postProcessing.thresholdFilter.maxRange = 10000
    rgbd.setDepthUnits(dai.StereoDepthConfig.AlgorithmControl.DepthUnit.METER)

    # Linking
    left.requestOutput((640, 400)).link(stereo.left)
    right.requestOutput((640, 400)).link(stereo.right)
    platform = p.getDefaultDevice().getPlatform()

    if platform == dai.Platform.RVC4:
        out = color.requestOutput((640,400), dai.ImgFrame.Type.RGB888i, enableUndistortion=True)
        align = p.create(dai.node.ImageAlign)
        stereo.depth.link(align.input)
        out.link(align.inputAlignTo)
        align.outputAligned.link(rgbd.inDepth)
    else:
        out = color.requestOutput((640,400), dai.ImgFrame.Type.RGB888i, dai.ImgResizeMode.CROP, 30, True)
        stereo.depth.link(rgbd.inDepth)
        out.link(stereo.inputAlignTo)
    out.link(rgbd.inColor)

    rgbd.pcl.link(rerunViewer.inputPCL)

    p.start()
    while p.isRunning():
        time.sleep(1)
```

#### C++

```cpp
#include "depthai/capabilities/ImgFrameCapability.hpp"
#include "depthai/depthai.hpp"
#include "rerun.hpp"
#include "rerun/archetypes/depth_image.hpp"

class RerunNode : public dai::NodeCRTP<dai::node::ThreadedHostNode, RerunNode> {
   public:
    constexpr static const char* NAME = "RerunNode";

   public:
    Input inputPCL{*this, {.name = "inPCL", .types = {{dai::DatatypeEnum::PointCloudData, true}}}};
    Input inputRGBD{*this, {.name = "inRGBD", .types = {{dai::DatatypeEnum::RGBDData, true}}}};

    void run() override {
        const auto rec = rerun::RecordingStream("rerun");
        rec.spawn().exit_on_failure();
        rec.log_static("world", rerun::ViewCoordinates::RDF);
        while(mainLoop()) {
            auto pclIn = inputPCL.get<dai::PointCloudData>();
            auto rgbdIn = inputRGBD.get<dai::RGBDData>();
            if(pclIn != nullptr) {
                std::vector<rerun::Position3D> points;
                std::vector<rerun::Color> colors;
                const auto& size = pclIn->getWidth() * pclIn->getHeight();
                points.reserve(size);
                colors.reserve(size);
                const auto& pclData = pclIn->getPointsRGB();
                for(size_t i = 0; i < size; ++i) {
                    points.emplace_back(pclData[i].x, pclData[i].y, pclData[i].z);
                    colors.emplace_back(pclData[i].r, pclData[i].g, pclData[i].b);
                }
                rec.log("world/obstacle_pcl", rerun::Points3D(points).with_colors(colors).with_radii({0.01f}));
                auto rgbFrame = rgbdIn->getRGBFrame();
                if(!rgbFrame.has_value()) continue;
                auto colorFrame = std::get<std::shared_ptr<dai::ImgFrame>>(rgbFrame.value())->getCvFrame();
                cv::cvtColor(colorFrame, colorFrame, cv::COLOR_BGR2RGB);
                rec.log("rgb",
                        rerun::Image(reinterpret_cast<const uint8_t*>(colorFrame.data),
                                     {static_cast<uint32_t>(colorFrame.cols), static_cast<uint32_t>(colorFrame.rows)},
                                     rerun::datatypes::ColorModel::RGB));
            }
        }
    }
};
int main() {
    using namespace std;
    // Create pipeline
    dai::Pipeline pipeline;
    // Define sources and outputs
    auto left = pipeline.create<dai::node::Camera>();
    auto right = pipeline.create<dai::node::Camera>();
    auto stereo = pipeline.create<dai::node::StereoDepth>();
    auto rgbd = pipeline.create<dai::node::RGBD>()->build();
    auto color = pipeline.create<dai::node::Camera>();
    std::shared_ptr<dai::node::ImageAlign> align;
    auto rerun = pipeline.create<RerunNode>();
    color->build();

    left->build(dai::CameraBoardSocket::CAM_B);
    right->build(dai::CameraBoardSocket::CAM_C);
    stereo->setSubpixel(true);
    stereo->setExtendedDisparity(false);
    stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::DEFAULT);
    stereo->setLeftRightCheck(true);
    stereo->setRectifyEdgeFillColor(0);  // black, to better see the cutout
    stereo->enableDistortionCorrection(true);
    stereo->initialConfig->setLeftRightCheckThreshold(10);
    stereo->initialConfig->postProcessing.thresholdFilter.maxRange = 10000;
    rgbd->setDepthUnit(dai::StereoDepthConfig::AlgorithmControl::DepthUnit::METER);

    left->requestOutput(std::pair<int, int>(640, 400))->link(stereo->left);
    right->requestOutput(std::pair<int, int>(640, 400))->link(stereo->right);

    auto platform = pipeline.getDefaultDevice()->getPlatform();
    if(platform == dai::Platform::RVC4) {
        auto* out = color->requestOutput(std::pair<int, int>(640, 400), dai::ImgFrame::Type::RGB888i, dai::ImgResizeMode::CROP, std::nullopt, true);
        out->link(rgbd->inColor);
        align = pipeline.create<dai::node::ImageAlign>();
        stereo->depth.link(align->input);
        out->link(align->inputAlignTo);
        align->outputAligned.link(rgbd->inDepth);
    } else {
        auto* out = color->requestOutput(std::pair<int, int>(640, 400), dai::ImgFrame::Type::RGB888i, dai::ImgResizeMode::CROP, 30, true);
        out->link(rgbd->inColor);
        out->link(stereo->inputAlignTo);
        stereo->depth.link(rgbd->inDepth);
    }

    // Linking
    rgbd->pcl.link(rerun->inputPCL);
    rgbd->rgbd.link(rerun->inputRGBD);
    pipeline.start();
    auto device = pipeline.getDefaultDevice();
    device->setIrLaserDotProjectorIntensity(0.7);
    pipeline.wait();
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
