# PointCloud Showcase

Demonstrates multiple [PointCloud](https://docs.luxonis.com/software-v3/depthai/depthai-components/host_nodes/pointcloud.md) node
configurations in a single pipeline:

 * Filtered point cloud — only valid points (z > 0), output in meters.
 * Organized point cloud — all width × height points kept, output in millimeters.
 * Camera-to-camera transform — point cloud transformed into another camera's coordinate system using
   setTargetCoordinateSystem(CameraBoardSocket).
 * Custom 4×4 transform — arbitrary rotation matrix applied via setTransformationMatrix, with passthrough depth output.
 * Colorized point cloud — aligned RGB input linked to produce colored points.

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Source code

#### Python

```python
#!/usr/bin/env python3
"""
PointCloud Node Showcase

Demonstrates filtered/organized output, camera-to-camera transforms,
housing coordinate system transforms, and custom 4x4 matrix transforms.

See examples/cpp/PointCloud/README.md for full documentation.
"""

import time
import depthai as dai

# ---------------------------------------------------------------------------
# Print helpers
# ---------------------------------------------------------------------------
def printHeader(title: str) -> None:
    print("\n╔══════════════════════════════════════════════╗")
    print(f"║  {title:<44s}║")
    print("╚══════════════════════════════════════════════╝")

def printPointCloudInfo(pcd: dai.PointCloudData, frameNum: int) -> None:
    points = pcd.getPoints()
    print(f"\n--- Frame {frameNum} ---")
    print(f"  Points       : {len(points)}")
    print(f"  Width×Height : {pcd.getWidth()} × {pcd.getHeight()}")
    print(f"  Organized    : {'yes' if pcd.isOrganized() else 'no'}")
    print(f"  Color        : {'yes' if pcd.isColor() else 'no'}")
    print(f"  Bounding box :"
          f"  X [{pcd.getMinX()}, {pcd.getMaxX()}]"
          f"  Y [{pcd.getMinY()}, {pcd.getMaxY()}]"
          f"  Z [{pcd.getMinZ()}, {pcd.getMaxZ()}]")

# ===========================================================================
NUM_FRAMES = 3

def main() -> None:
    print("PointCloud Node Showcase")
    print("========================")
    print("Connecting to device...")

    device = dai.Device()
    print(f"Device: {device.getDeviceName()}  (ID: {device.getDeviceId()})\n")

    # ------------------------------------------------------------------
    # Single pipeline – shared Camera + StereoDepth, multiple PointCloud
    # nodes configured differently.
    # ------------------------------------------------------------------
    with dai.Pipeline(device) as pipeline:
        left = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
        right = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
        color = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
        stereo = pipeline.create(dai.node.StereoDepth)
        left.requestFullResolutionOutput().link(stereo.left)
        right.requestFullResolutionOutput().link(stereo.right)

        # ── 1. Filtered point cloud  (METER) ────
        pcSparse = pipeline.create(dai.node.PointCloud)
        pcSparse.setRunOnHost(True)
        pcSparse.initialConfig.setLengthUnit(dai.LengthUnit.METER)
        stereo.depth.link(pcSparse.inputDepth)
        qSparse = pcSparse.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)

        # ── 2. Organized point cloud  (MILLIMETER) ───────
        pcOrganized = pipeline.create(dai.node.PointCloud)
        pcOrganized.setRunOnHost(True)
        pcOrganized.initialConfig.setLengthUnit(dai.LengthUnit.MILLIMETER)
        pcOrganized.initialConfig.setOrganized(True)
        stereo.depth.link(pcOrganized.inputDepth)
        qOrganized = pcOrganized.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)

        # ── 3. Transform pointcloud into another device's coordinate system ───
        pcCam = pipeline.create(dai.node.PointCloud)
        pcCam.setRunOnHost(True)
        pcCam.initialConfig.setLengthUnit(dai.LengthUnit.MILLIMETER)
        pcCam.initialConfig.setTargetCoordinateSystem(dai.CameraBoardSocket.CAM_A)
        # Or transform to a housing coordinate system instead, e.g.:
        # pcCam.initialConfig.setTargetCoordinateSystem(dai.HousingCoordinateSystem.VESA_A)
        stereo.depth.link(pcCam.inputDepth)
        qCam = pcCam.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)

        # ── 4. Custom 4×4 transform  (90° Z rotation) + passthrough ──────
        pcCustom = pipeline.create(dai.node.PointCloud)
        pcCustom.setRunOnHost(True)
        pcCustom.initialConfig.setLengthUnit(dai.LengthUnit.MILLIMETER)
        pcCustom.useCPU()
        transform = [
            [0.0, -1.0, 0.0, 0.0],
            [1.0,  0.0, 0.0, 0.0],
            [0.0,  0.0, 1.0, 0.0],
            [0.0,  0.0, 0.0, 1.0],
        ]
        pcCustom.initialConfig.setTransformationMatrix(transform)
        stereo.depth.link(pcCustom.inputDepth)
        qCustom = pcCustom.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)
        qDepth = pcCustom.passthroughDepth.createOutputQueue(maxSize=4, blocking=False)

        # ── 5. Colorized point cloud (aligned RGB from color camera) ─────
        pcColorized = pipeline.create(dai.node.PointCloud)
        pcColorized.setRunOnHost(True)
        pcColorized.initialConfig.setLengthUnit(dai.LengthUnit.METER)
        colorOut = color.requestOutput(
            (640, 400), type=dai.ImgFrame.Type.RGB888i,
            resizeMode=dai.ImgResizeMode.CROP, enableUndistortion=True,
        )
        platform = pipeline.getDefaultDevice().getPlatform()
        if platform == dai.Platform.RVC4:
            imageAlign = pipeline.create(dai.node.ImageAlign)
            stereo.depth.link(imageAlign.input)
            colorOut.link(imageAlign.inputAlignTo)
            imageAlign.outputAligned.link(pcColorized.inputDepth)
        else:
            colorOut.link(stereo.inputAlignTo)
            stereo.depth.link(pcColorized.inputDepth)
        colorOut.link(pcColorized.inputColor)
        qColorized = pcColorized.outputPointCloud.createOutputQueue(maxSize=4, blocking=False)

        # Note: Housing coordinate system transform is also available, e.g.:
        #   pc.initialConfig.setTargetCoordinateSystem(dai.HousingCoordinateSystem.VESA_A)
        # See the docstring at the top of this file for all available
        # CameraBoardSocket and HousingCoordinateSystem values.

        sparseFrames = []
        organizedFrames = []
        camFrames = []
        customFrames = []
        depthFrames = []
        colorizedFrames = []

        pipeline.start()

        # Wait for auto-exposure to settle and stereo depth to stabilize
        print("Waiting for auto-exposure to settle...")
        time.sleep(1)

        # Drain stale frames that arrived during warm-up
        qSparse.tryGetAll()
        qOrganized.tryGetAll()
        qCam.tryGetAll()
        qCustom.tryGetAll()
        qDepth.tryGetAll()
        qColorized.tryGetAll()

        for _ in range(NUM_FRAMES):
            sparseFrames.append(qSparse.get())
            organizedFrames.append(qOrganized.get())
            camFrames.append(qCam.get())
            customFrames.append(qCustom.get())
            depthFrames.append(qDepth.get())
            colorizedFrames.append(qColorized.get())

    # ------------------------------------------------------------------
    # Display results grouped by feature
    # ------------------------------------------------------------------

    # 1 ── Sparse point cloud
    printHeader("1. Basic sparse point cloud")
    print("  Config: METER")
    for i, pcd in enumerate(sparseFrames):
        printPointCloudInfo(pcd, i)

    # 2 ── Organized point cloud
    printHeader("2. Organized point cloud")
    print("  Config: MILLIMETER, initialConfig.setOrganized(True)")
    for i, pcd in enumerate(organizedFrames):
        printPointCloudInfo(pcd, i)

    # 3 ── Transform pointcloud into another device's coordinate system
    printHeader("3. Camera-to-camera transform")
    print("  Config: setTargetCoordinateSystem(CAM_A)")
    for i, pcd in enumerate(camFrames):
        printPointCloudInfo(pcd, i)

    # 4 ── Custom transform + passthrough depth
    printHeader("4. Custom transform matrix + passthrough")
    print("  Config: 90° Z rotation via initialConfig")
    if len(customFrames) != len(depthFrames):
        raise RuntimeError("customFrames and depthFrames must have the same length")
    for i, (pcd, depth) in enumerate(zip(customFrames, depthFrames)):
        printPointCloudInfo(pcd, i)
        print(f"  Depth frame  : {depth.getWidth()} × {depth.getHeight()}")

    # 5 ── Colorized point cloud
    printHeader("5. Colorized point cloud (RGB)")
    print("  Config: METER, aligned color camera linked to inputColor")
    for i, pcd in enumerate(colorizedFrames):
        printPointCloudInfo(pcd, i)

    print("\nAll demos completed.")

if __name__ == "__main__":
    main()
```

#### C++

```cpp
/**
 * PointCloud Node Showcase
 *
 * Demonstrates filtered/organized output, camera-to-camera transforms,
 * housing coordinate system transforms, and custom 4×4 matrix transforms.
 *
 * See README.md in this directory for full documentation.
 */

#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <thread>
#include <vector>

#include "depthai/depthai.hpp"

// ---------------------------------------------------------------------------
// Print helpers
// ---------------------------------------------------------------------------
static void printHeader(const std::string& title) {
    std::cout << "\n╔══════════════════════════════════════════════╗\n";
    std::cout << "║  " << std::left << std::setw(44) << title << "║\n";
    std::cout << "╚══════════════════════════════════════════════╝\n";
}

static void printPointCloudInfo(const dai::PointCloudData& pcd, int frameNum) {
    auto points = pcd.getPoints();

    std::cout << "\n--- Frame " << frameNum << " ---\n";
    std::cout << "  Points       : " << points.size() << "\n";
    std::cout << "  Width×Height : " << pcd.getWidth() << " × " << pcd.getHeight() << "\n";
    std::cout << "  Organized    : " << (pcd.isOrganized() ? "yes" : "no") << "\n";
    std::cout << "  Color        : " << (pcd.isColor() ? "yes" : "no") << "\n";
    std::cout << "  Bounding box :" << "  X [" << pcd.getMinX() << ", " << pcd.getMaxX() << "]" << "  Y [" << pcd.getMinY() << ", " << pcd.getMaxY() << "]"
              << "  Z [" << pcd.getMinZ() << ", " << pcd.getMaxZ() << "]\n";
}

// ---------------------------------------------------------------------------
// Main – single pipeline with multiple PointCloud nodes
// ---------------------------------------------------------------------------
static constexpr int NUM_FRAMES = 3;

int main() {
    std::cout << "PointCloud Node Showcase\n"
              << "========================\n"
              << "Connecting to device...\n";

    auto device = std::make_shared<dai::Device>();
    std::cout << "Device: " << device->getDeviceName() << "  (ID: " << device->getDeviceId() << ")\n";

    try {
        // ==============================================================
        // Single pipeline – shared Camera + StereoDepth, multiple
        // PointCloud nodes configured differently.
        // ==============================================================
        dai::Pipeline pipeline(device);

        auto left = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_B);
        auto right = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_C);
        auto color = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_A);
        auto stereo = pipeline.create<dai::node::StereoDepth>();
        left->requestFullResolutionOutput()->link(stereo->left);
        right->requestFullResolutionOutput()->link(stereo->right);

        // ── 1. Filtered point cloud (METER)
        auto pcSparse = pipeline.create<dai::node::PointCloud>();
        pcSparse->setRunOnHost(true);
        pcSparse->initialConfig->setLengthUnit(dai::LengthUnit::METER);
        stereo->depth.link(pcSparse->inputDepth);
        auto qSparse = pcSparse->outputPointCloud.createOutputQueue();

        // ── 2. Organized point cloud (MILLIMETER)
        auto pcOrganized = pipeline.create<dai::node::PointCloud>();
        pcOrganized->setRunOnHost(true);
        pcOrganized->initialConfig->setLengthUnit(dai::LengthUnit::MILLIMETER);
        pcOrganized->initialConfig->setOrganized(true);
        stereo->depth.link(pcOrganized->inputDepth);
        auto qOrganized = pcOrganized->outputPointCloud.createOutputQueue();

        // ── 3. Transform pointcloud into another camera's coordinate system
        auto pcCam = pipeline.create<dai::node::PointCloud>();
        pcCam->setRunOnHost(true);
        pcCam->initialConfig->setLengthUnit(dai::LengthUnit::MILLIMETER);
        pcCam->initialConfig->setTargetCoordinateSystem(dai::CameraBoardSocket::CAM_A);
        // Or transform to a housing coordinate system instead, e.g.:
        // pcCam->initialConfig->setTargetCoordinateSystem(dai::HousingCoordinateSystem::VESA_A);
        stereo->depth.link(pcCam->inputDepth);
        auto qCam = pcCam->outputPointCloud.createOutputQueue();

        // ── 4. Custom 4×4 transform (90° Z rotation) + passthrough
        auto pcCustom = pipeline.create<dai::node::PointCloud>();
        pcCustom->setRunOnHost(true);
        pcCustom->initialConfig->setLengthUnit(dai::LengthUnit::MILLIMETER);
        pcCustom->useCPU();
        std::array<std::array<float, 4>, 4> transform = {{{{0.f, -1.f, 0.f, 0.f}}, {{1.f, 0.f, 0.f, 0.f}}, {{0.f, 0.f, 1.f, 0.f}}, {{0.f, 0.f, 0.f, 1.f}}}};
        pcCustom->initialConfig->setTransformationMatrix(transform);
        stereo->depth.link(pcCustom->inputDepth);
        auto qCustom = pcCustom->outputPointCloud.createOutputQueue();
        auto qDepth = pcCustom->passthroughDepth.createOutputQueue();

        // ── 5. Colorized point cloud (aligned RGB from color camera)
        auto pcColor = pipeline.create<dai::node::PointCloud>();
        pcColor->setRunOnHost(true);
        pcColor->initialConfig->setLengthUnit(dai::LengthUnit::METER);
        auto* colorOut = color->requestOutput(std::make_pair(640, 400), dai::ImgFrame::Type::RGB888i, dai::ImgResizeMode::CROP, std::nullopt, true);
        auto platform = device->getPlatform();
        if(platform == dai::Platform::RVC4) {
            auto imageAlign = pipeline.create<dai::node::ImageAlign>();
            stereo->depth.link(imageAlign->input);
            colorOut->link(imageAlign->inputAlignTo);
            imageAlign->outputAligned.link(pcColor->inputDepth);
        } else {
            colorOut->link(stereo->inputAlignTo);
            stereo->depth.link(pcColor->inputDepth);
        }
        colorOut->link(pcColor->getColorInput());
        auto qColor = pcColor->outputPointCloud.createOutputQueue();

        // ==============================================================
        // Collect frames – drain all queues evenly to avoid back-pressure
        // ==============================================================
        struct TestCase {
            std::shared_ptr<dai::MessageQueue> queue;
            std::string title;
            std::string config;
            std::vector<std::shared_ptr<dai::PointCloudData>> frames;
        };

        std::vector<TestCase> testCases = {
            {qSparse, "1. Basic sparse point cloud", "METER", {}},
            {qOrganized, "2. Organized point cloud", "MILLIMETER, initialConfig->setOrganized(true)", {}},
            {qCam, "3. Camera-to-camera transform", "setTargetCoordinateSystem(CAM_A)", {}},
            {qCustom, "4. Custom transform matrix + passthrough", "90° Z rotation via initialConfig", {}},
            {qColor, "5. Colorized point cloud (RGB)", "METER, aligned color camera linked to inputColor", {}},
        };

        std::vector<std::shared_ptr<dai::ImgFrame>> depthFrames;

        pipeline.start();

        // Wait for auto-exposure to settle and stereo depth to stabilize
        std::cout << "Waiting for auto-exposure to settle...\n";
        std::this_thread::sleep_for(std::chrono::seconds(1));

        // Drain stale frames that arrived during warm-up
        for(auto& tc : testCases) {
            tc.queue->tryGetAll<dai::PointCloudData>();
        }
        qDepth->tryGetAll<dai::ImgFrame>();

        for(int i = 0; i < NUM_FRAMES; ++i) {
            for(auto& tc : testCases) {
                tc.frames.push_back(tc.queue->get<dai::PointCloudData>());
            }
            depthFrames.push_back(qDepth->get<dai::ImgFrame>());
        }
        pipeline.stop();

        // ==============================================================
        // Display results grouped by feature
        // ==============================================================
        for(const auto& tc : testCases) {
            printHeader(tc.title);
            std::cout << "  Config: " << tc.config << "\n";
            for(int i = 0; i < NUM_FRAMES; ++i) {
                printPointCloudInfo(*tc.frames[i], i);
                // Show depth passthrough info for the custom transform case
                if(tc.title.find("Custom") != std::string::npos) {
                    std::cout << "  Depth frame  : " << depthFrames[i]->getWidth() << " × " << depthFrames[i]->getHeight() << "\n";
                }
            }
        }

    } catch(const std::exception& e) {
        std::cerr << "\nError: " << e.what() << std::endl;
        return 1;
    }

    std::cout << "\nAll demos completed.\n";
    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
