DepthAI
  • DepthAI Components
    • AprilTags
    • Benchmark
    • Camera
    • Calibration
    • DetectionNetwork
    • Events
    • FeatureTracker
    • Gate
    • HostNodes
    • ImageAlign
    • ImageManip
    • IMU
    • Misc
    • Model Zoo
    • NeuralDepth
    • NeuralNetwork
    • ObjectTracker
    • PointCloud
    • RecordReplay
    • RGBD
    • Script
    • SpatialDetectionNetwork
    • SpatialLocationCalculator
    • StereoDepth
    • Sync
    • VideoEncoder
    • Visualizer
    • Warp
    • RVC2-specific
  • Advanced Tutorials
  • API Reference
  • Tools
Software Stack

ON THIS PAGE

  • Source code

PointCloud Showcase

Supported on:RVC2RVC4
Demonstrates multiple PointCloud node configurations in a single pipeline:
  • Filtered point cloud — only valid points (z > 0), output in meters.
  • Organized point cloud — all width × height points kept, output in millimeters.
  • Camera-to-camera transform — point cloud transformed into another camera's coordinate system using setTargetCoordinateSystem(CameraBoardSocket).
  • Custom 4×4 transform — arbitrary rotation matrix applied via setTransformationMatrix, with passthrough depth output.
  • Colorized point cloud — aligned RGB input linked to produce colored points.
This example requires the DepthAI v3 API, see installation instructions.

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2"""
3PointCloud Node Showcase
4
5Demonstrates filtered/organized output, camera-to-camera transforms,
6housing coordinate system transforms, and custom 4x4 matrix transforms.
7
8See examples/cpp/PointCloud/README.md for full documentation.
9"""
10
11import time
12import depthai as dai
13
14
15# ---------------------------------------------------------------------------
16# Print helpers
17# ---------------------------------------------------------------------------
18def printHeader(title: str) -> None:
19    print("\n╔══════════════════════════════════════════════╗")
20    print(f"║  {title:<44s}║")
21    print("╚══════════════════════════════════════════════╝")
22
23
24def printPointCloudInfo(pcd: dai.PointCloudData, frameNum: int) -> None:
25    points = pcd.getPoints()
26    print(f"\n--- Frame {frameNum} ---")
27    print(f"  Points       : {len(points)}")
28    print(f"  Width×Height : {pcd.getWidth()} × {pcd.getHeight()}")
29    print(f"  Organized    : {'yes' if pcd.isOrganized() else 'no'}")
30    print(f"  Color        : {'yes' if pcd.isColor() else 'no'}")
31    print(f"  Bounding box :"
32          f"  X [{pcd.getMinX()}, {pcd.getMaxX()}]"
33          f"  Y [{pcd.getMinY()}, {pcd.getMaxY()}]"
34          f"  Z [{pcd.getMinZ()}, {pcd.getMaxZ()}]")
35
36
37# ===========================================================================
38NUM_FRAMES = 3
39
40
41def main() -> None:
42    print("PointCloud Node Showcase")
43    print("========================")
44    print("Connecting to device...")
45
46    device = dai.Device()
47    print(f"Device: {device.getDeviceName()}  (ID: {device.getDeviceId()})\n")
48
49    # ------------------------------------------------------------------
50    # Single pipeline – shared Camera + StereoDepth, multiple PointCloud
51    # nodes configured differently.
52    # ------------------------------------------------------------------
53    with dai.Pipeline(device) as pipeline:
54        left = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
55        right = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
56        color = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
57        stereo = pipeline.create(dai.node.StereoDepth)
58        left.requestFullResolutionOutput().link(stereo.left)
59        right.requestFullResolutionOutput().link(stereo.right)
60
61        # ── 1. Filtered point cloud  (METER) ────
62        pcSparse = pipeline.create(dai.node.PointCloud)
63        pcSparse.setRunOnHost(True)
64        pcSparse.initialConfig.setLengthUnit(dai.LengthUnit.METER)
65        stereo.depth.link(pcSparse.inputDepth)
66        qSparse = pcSparse.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)
67
68        # ── 2. Organized point cloud  (MILLIMETER) ───────
69        pcOrganized = pipeline.create(dai.node.PointCloud)
70        pcOrganized.setRunOnHost(True)
71        pcOrganized.initialConfig.setLengthUnit(dai.LengthUnit.MILLIMETER)
72        pcOrganized.initialConfig.setOrganized(True)
73        stereo.depth.link(pcOrganized.inputDepth)
74        qOrganized = pcOrganized.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)
75
76        # ── 3. Transform pointcloud into another device's coordinate system ───
77        pcCam = pipeline.create(dai.node.PointCloud)
78        pcCam.setRunOnHost(True)
79        pcCam.initialConfig.setLengthUnit(dai.LengthUnit.MILLIMETER)
80        pcCam.initialConfig.setTargetCoordinateSystem(dai.CameraBoardSocket.CAM_A)
81        # Or transform to a housing coordinate system instead, e.g.:
82        # pcCam.initialConfig.setTargetCoordinateSystem(dai.HousingCoordinateSystem.VESA_A)
83        stereo.depth.link(pcCam.inputDepth)
84        qCam = pcCam.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)
85
86        # ── 4. Custom 4×4 transform  (90° Z rotation) + passthrough ──────
87        pcCustom = pipeline.create(dai.node.PointCloud)
88        pcCustom.setRunOnHost(True)
89        pcCustom.initialConfig.setLengthUnit(dai.LengthUnit.MILLIMETER)
90        pcCustom.useCPU()
91        transform = [
92            [0.0, -1.0, 0.0, 0.0],
93            [1.0,  0.0, 0.0, 0.0],
94            [0.0,  0.0, 1.0, 0.0],
95            [0.0,  0.0, 0.0, 1.0],
96        ]
97        pcCustom.initialConfig.setTransformationMatrix(transform)
98        stereo.depth.link(pcCustom.inputDepth)
99        qCustom = pcCustom.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)
100        qDepth = pcCustom.passthroughDepth.createOutputQueue(maxSize=4, blocking=False)
101
102        # ── 5. Colorized point cloud (aligned RGB from color camera) ─────
103        pcColorized = pipeline.create(dai.node.PointCloud)
104        pcColorized.setRunOnHost(True)
105        pcColorized.initialConfig.setLengthUnit(dai.LengthUnit.METER)
106        colorOut = color.requestOutput(
107            (640, 400), type=dai.ImgFrame.Type.RGB888i,
108            resizeMode=dai.ImgResizeMode.CROP, enableUndistortion=True,
109        )
110        platform = pipeline.getDefaultDevice().getPlatform()
111        if platform == dai.Platform.RVC4:
112            imageAlign = pipeline.create(dai.node.ImageAlign)
113            stereo.depth.link(imageAlign.input)
114            colorOut.link(imageAlign.inputAlignTo)
115            imageAlign.outputAligned.link(pcColorized.inputDepth)
116        else:
117            colorOut.link(stereo.inputAlignTo)
118            stereo.depth.link(pcColorized.inputDepth)
119        colorOut.link(pcColorized.inputColor)
120        qColorized = pcColorized.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)
121
122        # Note: Housing coordinate system transform is also available, e.g.:
123        #   pc.initialConfig.setTargetCoordinateSystem(dai.HousingCoordinateSystem.VESA_A)
124        # See the docstring at the top of this file for all available
125        # CameraBoardSocket and HousingCoordinateSystem values.
126
127        sparseFrames = []
128        organizedFrames = []
129        camFrames = []
130        customFrames = []
131        depthFrames = []
132        colorizedFrames = []
133
134        pipeline.start()
135
136        # Wait for auto-exposure to settle and stereo depth to stabilize
137        print("Waiting for auto-exposure to settle...")
138        time.sleep(1)
139
140        # Drain stale frames that arrived during warm-up
141        qSparse.tryGetAll()
142        qOrganized.tryGetAll()
143        qCam.tryGetAll()
144        qCustom.tryGetAll()
145        qDepth.tryGetAll()
146        qColorized.tryGetAll()
147
148        for _ in range(NUM_FRAMES):
149            sparseFrames.append(qSparse.get())
150            organizedFrames.append(qOrganized.get())
151            camFrames.append(qCam.get())
152            customFrames.append(qCustom.get())
153            depthFrames.append(qDepth.get())
154            colorizedFrames.append(qColorized.get())
155
156    # ------------------------------------------------------------------
157    # Display results grouped by feature
158    # ------------------------------------------------------------------
159
160    # 1 ── Sparse point cloud
161    printHeader("1. Basic sparse point cloud")
162    print("  Config: METER")
163    for i, pcd in enumerate(sparseFrames):
164        printPointCloudInfo(pcd, i)
165
166    # 2 ── Organized point cloud
167    printHeader("2. Organized point cloud")
168    print("  Config: MILLIMETER, initialConfig.setOrganized(True)")
169    for i, pcd in enumerate(organizedFrames):
170        printPointCloudInfo(pcd, i)
171
172    # 3 ── Transform pointcloud into another device's coordinate system
173    printHeader("3. Camera-to-camera transform")
174    print("  Config: setTargetCoordinateSystem(CAM_A)")
175    for i, pcd in enumerate(camFrames):
176        printPointCloudInfo(pcd, i)
177
178    # 4 ── Custom transform + passthrough depth
179    printHeader("4. Custom transform matrix + passthrough")
180    print("  Config: 90° Z rotation via initialConfig")
181    if len(customFrames) != len(depthFrames):
182        raise RuntimeError("customFrames and depthFrames must have the same length")
183    for i, (pcd, depth) in enumerate(zip(customFrames, depthFrames)):
184        printPointCloudInfo(pcd, i)
185        print(f"  Depth frame  : {depth.getWidth()} × {depth.getHeight()}")
186
187    # 5 ── Colorized point cloud
188    printHeader("5. Colorized point cloud (RGB)")
189    print("  Config: METER, aligned color camera linked to inputColor")
190    for i, pcd in enumerate(colorizedFrames):
191        printPointCloudInfo(pcd, i)
192
193    print("\nAll demos completed.")
194
195
196if __name__ == "__main__":
197    main()

C++

1/**
2 * PointCloud Node Showcase
3 *
4 * Demonstrates filtered/organized output, camera-to-camera transforms,
5 * housing coordinate system transforms, and custom 4×4 matrix transforms.
6 *
7 * See README.md in this directory for full documentation.
8 */
9
10#include <chrono>
11#include <iomanip>
12#include <iostream>
13#include <memory>
14#include <thread>
15#include <vector>
16
17#include "depthai/depthai.hpp"
18
19// ---------------------------------------------------------------------------
20// Print helpers
21// ---------------------------------------------------------------------------
22static void printHeader(const std::string& title) {
23    std::cout << "\n╔══════════════════════════════════════════════╗\n";
24    std::cout << "║  " << std::left << std::setw(44) << title << "║\n";
25    std::cout << "╚══════════════════════════════════════════════╝\n";
26}
27
28static void printPointCloudInfo(const dai::PointCloudData& pcd, int frameNum) {
29    auto points = pcd.getPoints();
30
31    std::cout << "\n--- Frame " << frameNum << " ---\n";
32    std::cout << "  Points       : " << points.size() << "\n";
33    std::cout << "  Width×Height : " << pcd.getWidth() << " × " << pcd.getHeight() << "\n";
34    std::cout << "  Organized    : " << (pcd.isOrganized() ? "yes" : "no") << "\n";
35    std::cout << "  Color        : " << (pcd.isColor() ? "yes" : "no") << "\n";
36    std::cout << "  Bounding box :" << "  X [" << pcd.getMinX() << ", " << pcd.getMaxX() << "]" << "  Y [" << pcd.getMinY() << ", " << pcd.getMaxY() << "]"
37              << "  Z [" << pcd.getMinZ() << ", " << pcd.getMaxZ() << "]\n";
38}
39
40// ---------------------------------------------------------------------------
41// Main – single pipeline with multiple PointCloud nodes
42// ---------------------------------------------------------------------------
43static constexpr int NUM_FRAMES = 3;
44
45int main() {
46    std::cout << "PointCloud Node Showcase\n"
47              << "========================\n"
48              << "Connecting to device...\n";
49
50    auto device = std::make_shared<dai::Device>();
51    std::cout << "Device: " << device->getDeviceName() << "  (ID: " << device->getDeviceId() << ")\n";
52
53    try {
54        // ==============================================================
55        // Single pipeline – shared Camera + StereoDepth, multiple
56        // PointCloud nodes configured differently.
57        // ==============================================================
58        dai::Pipeline pipeline(device);
59
60        auto left = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_B);
61        auto right = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_C);
62        auto color = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_A);
63        auto stereo = pipeline.create<dai::node::StereoDepth>();
64        left->requestFullResolutionOutput()->link(stereo->left);
65        right->requestFullResolutionOutput()->link(stereo->right);
66
67        // ── 1. Filtered point cloud (METER)
68        auto pcSparse = pipeline.create<dai::node::PointCloud>();
69        pcSparse->setRunOnHost(true);
70        pcSparse->initialConfig->setLengthUnit(dai::LengthUnit::METER);
71        stereo->depth.link(pcSparse->inputDepth);
72        auto qSparse = pcSparse->outputPointCloud.createOutputQueue();
73
74        // ── 2. Organized point cloud (MILLIMETER)
75        auto pcOrganized = pipeline.create<dai::node::PointCloud>();
76        pcOrganized->setRunOnHost(true);
77        pcOrganized->initialConfig->setLengthUnit(dai::LengthUnit::MILLIMETER);
78        pcOrganized->initialConfig->setOrganized(true);
79        stereo->depth.link(pcOrganized->inputDepth);
80        auto qOrganized = pcOrganized->outputPointCloud.createOutputQueue();
81
82        // ── 3. Transform pointcloud into another camera's coordinate system
83        auto pcCam = pipeline.create<dai::node::PointCloud>();
84        pcCam->setRunOnHost(true);
85        pcCam->initialConfig->setLengthUnit(dai::LengthUnit::MILLIMETER);
86        pcCam->initialConfig->setTargetCoordinateSystem(dai::CameraBoardSocket::CAM_A);
87        // Or transform to a housing coordinate system instead, e.g.:
88        // pcCam->initialConfig->setTargetCoordinateSystem(dai::HousingCoordinateSystem::VESA_A);
89        stereo->depth.link(pcCam->inputDepth);
90        auto qCam = pcCam->outputPointCloud.createOutputQueue();
91
92        // ── 4. Custom 4×4 transform (90° Z rotation) + passthrough
93        auto pcCustom = pipeline.create<dai::node::PointCloud>();
94        pcCustom->setRunOnHost(true);
95        pcCustom->initialConfig->setLengthUnit(dai::LengthUnit::MILLIMETER);
96        pcCustom->useCPU();
97        std::array<std::array<float, 4>, 4> transform = {{{{0.f, -1.f, 0.f, 0.f}}, {{1.f, 0.f, 0.f, 0.f}}, {{0.f, 0.f, 1.f, 0.f}}, {{0.f, 0.f, 0.f, 1.f}}}};
98        pcCustom->initialConfig->setTransformationMatrix(transform);
99        stereo->depth.link(pcCustom->inputDepth);
100        auto qCustom = pcCustom->outputPointCloud.createOutputQueue();
101        auto qDepth = pcCustom->passthroughDepth.createOutputQueue();
102
103        // ── 5. Colorized point cloud (aligned RGB from color camera)
104        auto pcColor = pipeline.create<dai::node::PointCloud>();
105        pcColor->setRunOnHost(true);
106        pcColor->initialConfig->setLengthUnit(dai::LengthUnit::METER);
107        auto* colorOut = color->requestOutput(std::make_pair(640, 400), dai::ImgFrame::Type::RGB888i, dai::ImgResizeMode::CROP, std::nullopt, true);
108        auto platform = device->getPlatform();
109        if(platform == dai::Platform::RVC4) {
110            auto imageAlign = pipeline.create<dai::node::ImageAlign>();
111            stereo->depth.link(imageAlign->input);
112            colorOut->link(imageAlign->inputAlignTo);
113            imageAlign->outputAligned.link(pcColor->inputDepth);
114        } else {
115            colorOut->link(stereo->inputAlignTo);
116            stereo->depth.link(pcColor->inputDepth);
117        }
118        colorOut->link(pcColor->getColorInput());
119        auto qColor = pcColor->outputPointCloud.createOutputQueue();
120
121        // ==============================================================
122        // Collect frames – drain all queues evenly to avoid back-pressure
123        // ==============================================================
124        struct TestCase {
125            std::shared_ptr<dai::MessageQueue> queue;
126            std::string title;
127            std::string config;
128            std::vector<std::shared_ptr<dai::PointCloudData>> frames;
129        };
130
131        std::vector<TestCase> testCases = {
132            {qSparse, "1. Basic sparse point cloud", "METER", {}},
133            {qOrganized, "2. Organized point cloud", "MILLIMETER, initialConfig->setOrganized(true)", {}},
134            {qCam, "3. Camera-to-camera transform", "setTargetCoordinateSystem(CAM_A)", {}},
135            {qCustom, "4. Custom transform matrix + passthrough", "90° Z rotation via initialConfig", {}},
136            {qColor, "5. Colorized point cloud (RGB)", "METER, aligned color camera linked to inputColor", {}},
137        };
138
139        std::vector<std::shared_ptr<dai::ImgFrame>> depthFrames;
140
141        pipeline.start();
142
143        // Wait for auto-exposure to settle and stereo depth to stabilize
144        std::cout << "Waiting for auto-exposure to settle...\n";
145        std::this_thread::sleep_for(std::chrono::seconds(1));
146
147        // Drain stale frames that arrived during warm-up
148        for(auto& tc : testCases) {
149            tc.queue->tryGetAll<dai::PointCloudData>();
150        }
151        qDepth->tryGetAll<dai::ImgFrame>();
152
153        for(int i = 0; i < NUM_FRAMES; ++i) {
154            for(auto& tc : testCases) {
155                tc.frames.push_back(tc.queue->get<dai::PointCloudData>());
156            }
157            depthFrames.push_back(qDepth->get<dai::ImgFrame>());
158        }
159        pipeline.stop();
160
161        // ==============================================================
162        // Display results grouped by feature
163        // ==============================================================
164        for(const auto& tc : testCases) {
165            printHeader(tc.title);
166            std::cout << "  Config: " << tc.config << "\n";
167            for(int i = 0; i < NUM_FRAMES; ++i) {
168                printPointCloudInfo(*tc.frames[i], i);
169                // Show depth passthrough info for the custom transform case
170                if(tc.title.find("Custom") != std::string::npos) {
171                    std::cout << "  Depth frame  : " << depthFrames[i]->getWidth() << " × " << depthFrames[i]->getHeight() << "\n";
172                }
173            }
174        }
175
176    } catch(const std::exception& e) {
177        std::cerr << "\nError: " << e.what() << std::endl;
178        return 1;
179    }
180
181    std::cout << "\nAll demos completed.\n";
182    return 0;
183}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.