DepthAI
  • DepthAI Components
    • AprilTags
    • Benchmark
    • Camera
    • Calibration
    • DetectionNetwork
    • Events
    • FeatureTracker
    • Gate
    • HostNodes
    • ImageAlign
    • ImageManip
    • IMU
    • Misc
    • Model Zoo
    • NeuralDepth
    • NeuralNetwork
    • ObjectTracker
    • PointCloud
    • RecordReplay
    • RGBD
    • Script
    • SpatialDetectionNetwork
    • SpatialLocationCalculator
    • StereoDepth
    • Sync
    • VideoEncoder
    • Visualizer
    • Warp
    • RVC2-specific
  • Advanced Tutorials
  • API Reference
  • Tools
Software Stack

ON THIS PAGE

  • Demo
  • Pipeline
  • Source code with OAK Visualizer in the loop
  • Source code with Open3D in the loop
  • Source code with Rerun in the loop

RGBD Point Cloud Visualization

Supported on:RVC2RVC4
This example demonstrates how to use the RGBD node to generate RGB-D data and visualize it as a 3D point cloud. The core pipeline remains the same, while visualization can be done in one of three interchangeable ways:
  • Rerun
  • Open3D
  • OAK Visualizer - Native viewer bundled with DepthAI
The only difference between these modes is in how the RGBD and point cloud output is displayed. You can switch the visualizer in the code as needed.

Demo

RGBD Point Cloud Visualization with Rerun
This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code with OAK Visualizer in the loop

Python

Python
GitHub
1import time
2import depthai as dai
3
4from argparse import ArgumentParser
5
6NEURAL_FPS = 8
7STEREO_DEFAULT_FPS = 30
8TOF_DEFAULT_FPS = 30
9
10parser = ArgumentParser()
11parser.add_argument("--webSocketPort", type=int, default=8765)
12parser.add_argument("--httpPort", type=int, default=8082)
13parser.add_argument("--depthSource", type=str, default="stereo", choices=["stereo", "neural", "tof"])
14args = parser.parse_args()
15
16with dai.Pipeline() as p:
17    remoteConnector = dai.RemoteConnection(
18        webSocketPort=args.webSocketPort, httpPort=args.httpPort
19    )
20
21    size = (640, 400)
22    if args.depthSource == "neural":
23        fps = NEURAL_FPS
24    elif args.depthSource == "tof":
25        fps = TOF_DEFAULT_FPS
26    else:
27        fps = STEREO_DEFAULT_FPS
28
29    if args.depthSource == "stereo":
30        color = p.create(dai.node.Camera).build(sensorFps=fps)
31        left = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B, sensorFps=fps)
32        right = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps)
33        depthSource = p.create(dai.node.StereoDepth)
34        depthSource.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
35        depthSource.setRectifyEdgeFillColor(0)
36        depthSource.enableDistortionCorrection(True)
37        left.requestOutput(size).link(depthSource.left)
38        right.requestOutput(size).link(depthSource.right)
39    elif args.depthSource == "neural":
40        color = p.create(dai.node.Camera).build(sensorFps=fps)
41        left = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B, sensorFps=fps)
42        right = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps)
43        depthSource = p.create(dai.node.NeuralDepth).build(left.requestOutput(size), right.requestOutput(size), dai.DeviceModelZoo.NEURAL_DEPTH_LARGE)
44    elif args.depthSource == "tof":
45        color = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps)
46        socket, preset_mode = dai.CameraBoardSocket.AUTO, dai.ImageFiltersPresetMode.TOF_MID_RANGE
47        depthSource = p.create(dai.node.ToF).build(socket, preset_mode)
48    else:
49        raise ValueError(f"Invalid depth source: {args.depthSource}")
50
51    rgbd = p.create(dai.node.RGBD).build(color, depthSource, size, fps)
52
53    remoteConnector.addTopic("pcl", rgbd.pcl, "common")
54    p.start()
55    remoteConnector.registerPipeline(p)
56
57    print("Pipeline started with depth source: ", args.depthSource)
58
59    while p.isRunning():
60        key = remoteConnector.waitKey(1)
61        if key == ord("q"):
62            print("Got q key from the remote connection!")
63            break

C++

1#include <argparse/argparse.hpp>
2#include <csignal>
3#include <depthai/remote_connection/RemoteConnection.hpp>
4#include <iostream>
5
6#include "depthai/depthai.hpp"
7
8constexpr float NEURAL_FPS = 8.0f;
9constexpr float STEREO_DEFAULT_FPS = 30.0f;
10
11// Signal handling for clean shutdown
12static bool isRunning = true;
13void signalHandler(int signum) {
14    (void)signum;
15    isRunning = false;
16}
17
18int main(int argc, char** argv) {
19    // Initialize argument parser
20    argparse::ArgumentParser program("visualizer_rgbd", "1.0.0");
21    program.add_description("RGBD point cloud visualizer with configurable depth source");
22    program.add_argument("--webSocketPort").default_value(8765).scan<'i', int>().help("WebSocket port for remote connection");
23    program.add_argument("--httpPort").default_value(8082).scan<'i', int>().help("HTTP port for remote connection");
24    program.add_argument("--depthSource").default_value(std::string("stereo")).help("Depth source: stereo, neural, tof");
25
26    // Parse arguments
27    try {
28        program.parse_args(argc, argv);
29    } catch(const std::runtime_error& err) {
30        std::cerr << err.what() << std::endl;
31        std::cerr << program;
32        return EXIT_FAILURE;
33    }
34
35    // Get arguments
36    int webSocketPort = program.get<int>("--webSocketPort");
37    int httpPort = program.get<int>("--httpPort");
38    std::string depthSourceArg = program.get<std::string>("--depthSource");
39
40    // Validate depth source argument
41    if(depthSourceArg != "stereo" && depthSourceArg != "neural" && depthSourceArg != "tof") {
42        std::cerr << "Invalid depth source: " << depthSourceArg << std::endl;
43        std::cerr << "Valid options are: stereo, neural, tof" << std::endl;
44        return EXIT_FAILURE;
45    }
46
47    // Register signal handler
48    std::signal(SIGINT, signalHandler);
49
50    try {
51        // Create RemoteConnection
52        dai::RemoteConnection remoteConnector(dai::RemoteConnection::DEFAULT_ADDRESS, webSocketPort, true, httpPort);
53
54        // Create pipeline
55        dai::Pipeline pipeline;
56
57        float fps = STEREO_DEFAULT_FPS;
58        if(depthSourceArg == "neural") {
59            fps = NEURAL_FPS;
60        }
61
62        const std::pair<int, int> size = std::make_pair(640, 400);
63
64        // Create color camera
65        auto color = pipeline.create<dai::node::Camera>();
66        color->build(dai::CameraBoardSocket::AUTO, std::nullopt, fps);
67
68        // Create depth source based on argument
69        dai::node::DepthSource depthSource;
70
71        if(depthSourceArg == "stereo") {
72            auto left = pipeline.create<dai::node::Camera>();
73            auto right = pipeline.create<dai::node::Camera>();
74            auto stereo = pipeline.create<dai::node::StereoDepth>();
75
76            left->build(dai::CameraBoardSocket::CAM_B, std::nullopt, fps);
77            right->build(dai::CameraBoardSocket::CAM_C, std::nullopt, fps);
78
79            stereo->setSubpixel(true);
80            stereo->setExtendedDisparity(false);
81            stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::DEFAULT);
82            stereo->setLeftRightCheck(true);
83            stereo->setRectifyEdgeFillColor(0);  // black, to better see the cutout
84            stereo->enableDistortionCorrection(true);
85            stereo->initialConfig->setLeftRightCheckThreshold(10);
86
87            left->requestOutput(size, std::nullopt, dai::ImgResizeMode::CROP)->link(stereo->left);
88            right->requestOutput(size, std::nullopt, dai::ImgResizeMode::CROP)->link(stereo->right);
89
90            depthSource = stereo;
91        } else if(depthSourceArg == "neural") {
92            auto left = pipeline.create<dai::node::Camera>();
93            auto right = pipeline.create<dai::node::Camera>();
94
95            left->build(dai::CameraBoardSocket::CAM_B, std::nullopt, fps);
96            right->build(dai::CameraBoardSocket::CAM_C, std::nullopt, fps);
97
98            auto neuralDepth = pipeline.create<dai::node::NeuralDepth>();
99            neuralDepth->build(*left->requestFullResolutionOutput(), *right->requestFullResolutionOutput(), dai::DeviceModelZoo::NEURAL_DEPTH_LARGE);
100
101            depthSource = neuralDepth;
102        } else if(depthSourceArg == "tof") {
103            auto tof = pipeline.create<dai::node::ToF>();
104            depthSource = tof;
105        }
106
107        // Create RGBD node using the unified build method with DepthSource variant
108        auto rgbd = pipeline.create<dai::node::RGBD>();
109        rgbd->build(color, depthSource, size, fps);
110
111        remoteConnector.addTopic("pcl", rgbd->pcl);
112        pipeline.start();
113        remoteConnector.registerPipeline(pipeline);
114
115        auto device = pipeline.getDefaultDevice();
116        device->setIrLaserDotProjectorIntensity(0.7);
117
118        std::cout << "Pipeline started with depth source: " << depthSourceArg << std::endl;
119
120        // Main loop
121        while(isRunning && pipeline.isRunning()) {
122            int key = remoteConnector.waitKey(1);
123            if(key == 'q') {
124                std::cout << "Got 'q' key from the remote connection!" << std::endl;
125                break;
126            }
127        }
128
129        std::cout << "Pipeline stopped." << std::endl;
130
131    } catch(const std::exception& e) {
132        std::cerr << "Error: " << e.what() << std::endl;
133        return EXIT_FAILURE;
134    }
135
136    return EXIT_SUCCESS;
137}

Source code with Open3D in the loop

Python

Python
GitHub
1import time
2import depthai as dai
3import sys
4import numpy as np
5
6try:
7    import open3d as o3d
8except ImportError:
9    sys.exit(
10        "Critical dependency missing: Open3D. Please install it using the command: '{} -m pip install open3d' and then rerun the script.".format(
11            sys.executable
12        )
13    )
14
15
16class O3DNode(dai.node.ThreadedHostNode):
17    def __init__(self):
18        dai.node.ThreadedHostNode.__init__(self)
19        self.inputPCL = self.createInput()
20
21    def run(self):
22        def key_callback(vis, action, mods):
23            global isRunning
24            if action == 0:
25                isRunning = False
26
27        vis = o3d.visualization.VisualizerWithKeyCallback()
28        vis.create_window()
29        vis.register_key_action_callback(81, key_callback)
30        pcd = o3d.geometry.PointCloud()
31        coordinateFrame = o3d.geometry.TriangleMesh.create_coordinate_frame(
32            size=1000, origin=[0, 0, 0]
33        )
34        vis.add_geometry(coordinateFrame)
35        first = True
36        while self.mainLoop():
37            try:
38                inPointCloud = self.inputPCL.tryGet()
39            except dai.MessageQueue.QueueException:
40                return # Pipeline closed
41            if inPointCloud is not None:
42                points, colors = inPointCloud.getPointsRGB()
43                pcd.points = o3d.utility.Vector3dVector(points.astype(np.float64))
44                colors = (colors / 255.0).astype(np.float64)
45                pcd.colors = o3d.utility.Vector3dVector(np.delete(colors, 3, 1))
46                if first:
47                    vis.add_geometry(pcd)
48                    first = False
49                else:
50                    vis.update_geometry(pcd)
51            vis.poll_events()
52            vis.update_renderer()
53
54        vis.destroy_window()
55
56
57# Create pipeline
58
59with dai.Pipeline() as p:
60    fps = 30
61    # Define sources and outputs
62    left = p.create(dai.node.Camera)
63    right = p.create(dai.node.Camera)
64    color = p.create(dai.node.Camera)
65    stereo = p.create(dai.node.StereoDepth)
66    rgbd = p.create(dai.node.RGBD).build()
67    align = None
68    color.build()
69    o3dViewer = p.create(O3DNode)
70    left.build(dai.CameraBoardSocket.CAM_B)
71    right.build(dai.CameraBoardSocket.CAM_C)
72    out = None
73
74    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
75    stereo.setRectifyEdgeFillColor(0)
76    stereo.enableDistortionCorrection(True)
77
78    # Linking
79    left.requestOutput((640, 400)).link(stereo.left)
80    right.requestOutput((640, 400)).link(stereo.right)
81    platform = p.getDefaultDevice().getPlatform()
82    if platform == dai.Platform.RVC4:
83        out = color.requestOutput((640, 400), dai.ImgFrame.Type.RGB888i, enableUndistortion=True)
84        align = p.create(dai.node.ImageAlign)
85        stereo.depth.link(align.input)
86        out.link(align.inputAlignTo)
87        align.outputAligned.link(rgbd.inDepth)
88    else:
89        out = color.requestOutput(
90            (640, 400), dai.ImgFrame.Type.RGB888i, dai.ImgResizeMode.CROP, 30, True
91        )
92        stereo.depth.link(rgbd.inDepth)
93        out.link(stereo.inputAlignTo)
94    out.link(rgbd.inColor)
95
96    rgbd.pcl.link(o3dViewer.inputPCL)
97
98    p.start()
99    while p.isRunning():
100        time.sleep(1)

Source code with Rerun in the loop

Python

Python
GitHub
1import time
2import depthai as dai
3import sys
4
5from pathlib import Path
6installExamplesStr = Path(__file__).absolute().parents[1] / 'install_requirements.py --install_rerun'
7try:
8    import rerun as rr
9except ImportError:
10    sys.exit("Critical dependency missing: Rerun. Please install it using the command: '{} {}' and then rerun the script.".format(sys.executable, installExamplesStr))
11
12import cv2
13
14class RerunNode(dai.node.ThreadedHostNode):
15    def __init__(self):
16        dai.node.ThreadedHostNode.__init__(self)
17        self.inputPCL = self.createInput()
18
19
20    def run(self):
21        rr.init("", spawn=True)
22        rr.log("world", rr.ViewCoordinates.RDF)
23        rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001]))
24        while self.mainLoop():
25            try:
26                inPointCloud = self.inputPCL.get()
27            except dai.MessageQueue.QueueException:
28                return # Pipeline closed
29            if inPointCloud is not None:
30                points, colors = inPointCloud.getPointsRGB()
31                rr.log("world/pcl", rr.Points3D(points, colors=colors, radii=[0.01]))
32
33# Create pipeline
34
35with dai.Pipeline() as p:
36    fps = 30
37    # Define sources and outputs
38    left = p.create(dai.node.Camera)
39    right = p.create(dai.node.Camera)
40    color = p.create(dai.node.Camera)
41    stereo = p.create(dai.node.StereoDepth)
42    rgbd = p.create(dai.node.RGBD).build()
43    align = None
44    color.build()
45    rerunViewer = p.create(RerunNode)
46    left.build(dai.CameraBoardSocket.CAM_B)
47    right.build(dai.CameraBoardSocket.CAM_C)
48    out = None
49
50    stereo.setRectifyEdgeFillColor(0)
51    stereo.enableDistortionCorrection(True)
52    stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.DEFAULT)
53    stereo.initialConfig.postProcessing.thresholdFilter.maxRange = 10000
54    rgbd.setDepthUnits(dai.StereoDepthConfig.AlgorithmControl.DepthUnit.METER)
55
56    # Linking
57    left.requestOutput((640, 400)).link(stereo.left)
58    right.requestOutput((640, 400)).link(stereo.right)
59    platform = p.getDefaultDevice().getPlatform()
60
61    if platform == dai.Platform.RVC4:
62        out = color.requestOutput((640,400), dai.ImgFrame.Type.RGB888i, enableUndistortion=True)
63        align = p.create(dai.node.ImageAlign)
64        stereo.depth.link(align.input)
65        out.link(align.inputAlignTo)
66        align.outputAligned.link(rgbd.inDepth)
67    else:
68        out = color.requestOutput((640,400), dai.ImgFrame.Type.RGB888i, dai.ImgResizeMode.CROP, 30, True)
69        stereo.depth.link(rgbd.inDepth)
70        out.link(stereo.inputAlignTo)
71    out.link(rgbd.inColor)
72
73    rgbd.pcl.link(rerunViewer.inputPCL)
74
75    p.start()
76    while p.isRunning():
77        time.sleep(1)

C++

1#include "depthai/capabilities/ImgFrameCapability.hpp"
2#include "depthai/depthai.hpp"
3#include "rerun.hpp"
4#include "rerun/archetypes/depth_image.hpp"
5
6class RerunNode : public dai::NodeCRTP<dai::node::ThreadedHostNode, RerunNode> {
7   public:
8    constexpr static const char* NAME = "RerunNode";
9
10   public:
11    Input inputPCL{*this, {.name = "inPCL", .types = {{dai::DatatypeEnum::PointCloudData, true}}}};
12    Input inputRGBD{*this, {.name = "inRGBD", .types = {{dai::DatatypeEnum::RGBDData, true}}}};
13
14    void run() override {
15        const auto rec = rerun::RecordingStream("rerun");
16        rec.spawn().exit_on_failure();
17        rec.log_static("world", rerun::ViewCoordinates::RDF);
18        while(mainLoop()) {
19            auto pclIn = inputPCL.get<dai::PointCloudData>();
20            auto rgbdIn = inputRGBD.get<dai::RGBDData>();
21            if(pclIn != nullptr) {
22                std::vector<rerun::Position3D> points;
23                std::vector<rerun::Color> colors;
24                const auto& size = pclIn->getWidth() * pclIn->getHeight();
25                points.reserve(size);
26                colors.reserve(size);
27                const auto& pclData = pclIn->getPointsRGB();
28                for(size_t i = 0; i < size; ++i) {
29                    points.emplace_back(pclData[i].x, pclData[i].y, pclData[i].z);
30                    colors.emplace_back(pclData[i].r, pclData[i].g, pclData[i].b);
31                }
32                rec.log("world/obstacle_pcl", rerun::Points3D(points).with_colors(colors).with_radii({0.01f}));
33                auto rgbFrame = rgbdIn->getRGBFrame();
34                if(!rgbFrame.has_value()) continue;
35                auto colorFrame = std::get<std::shared_ptr<dai::ImgFrame>>(rgbFrame.value())->getCvFrame();
36                cv::cvtColor(colorFrame, colorFrame, cv::COLOR_BGR2RGB);
37                rec.log("rgb",
38                        rerun::Image(reinterpret_cast<const uint8_t*>(colorFrame.data),
39                                     {static_cast<uint32_t>(colorFrame.cols), static_cast<uint32_t>(colorFrame.rows)},
40                                     rerun::datatypes::ColorModel::RGB));
41            }
42        }
43    }
44};
45int main() {
46    using namespace std;
47    // Create pipeline
48    dai::Pipeline pipeline;
49    // Define sources and outputs
50    auto left = pipeline.create<dai::node::Camera>();
51    auto right = pipeline.create<dai::node::Camera>();
52    auto stereo = pipeline.create<dai::node::StereoDepth>();
53    auto rgbd = pipeline.create<dai::node::RGBD>()->build();
54    auto color = pipeline.create<dai::node::Camera>();
55    std::shared_ptr<dai::node::ImageAlign> align;
56    auto rerun = pipeline.create<RerunNode>();
57    color->build();
58
59    left->build(dai::CameraBoardSocket::CAM_B);
60    right->build(dai::CameraBoardSocket::CAM_C);
61    stereo->setSubpixel(true);
62    stereo->setExtendedDisparity(false);
63    stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::DEFAULT);
64    stereo->setLeftRightCheck(true);
65    stereo->setRectifyEdgeFillColor(0);  // black, to better see the cutout
66    stereo->enableDistortionCorrection(true);
67    stereo->initialConfig->setLeftRightCheckThreshold(10);
68    stereo->initialConfig->postProcessing.thresholdFilter.maxRange = 10000;
69    rgbd->setDepthUnit(dai::StereoDepthConfig::AlgorithmControl::DepthUnit::METER);
70
71    left->requestOutput(std::pair<int, int>(640, 400))->link(stereo->left);
72    right->requestOutput(std::pair<int, int>(640, 400))->link(stereo->right);
73
74    auto platform = pipeline.getDefaultDevice()->getPlatform();
75    if(platform == dai::Platform::RVC4) {
76        auto* out = color->requestOutput(std::pair<int, int>(640, 400), dai::ImgFrame::Type::RGB888i, dai::ImgResizeMode::CROP, std::nullopt, true);
77        out->link(rgbd->inColor);
78        align = pipeline.create<dai::node::ImageAlign>();
79        stereo->depth.link(align->input);
80        out->link(align->inputAlignTo);
81        align->outputAligned.link(rgbd->inDepth);
82    } else {
83        auto* out = color->requestOutput(std::pair<int, int>(640, 400), dai::ImgFrame::Type::RGB888i, dai::ImgResizeMode::CROP, 30, true);
84        out->link(rgbd->inColor);
85        out->link(stereo->inputAlignTo);
86        stereo->depth.link(rgbd->inDepth);
87    }
88
89    // Linking
90    rgbd->pcl.link(rerun->inputPCL);
91    rgbd->rgbd.link(rerun->inputRGBD);
92    pipeline.start();
93    auto device = pipeline.getDefaultDevice();
94    device->setIrLaserDotProjectorIntensity(0.7);
95    pipeline.wait();
96}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.