DepthAI
  • DepthAI Components
    • AprilTags
    • Benchmark
    • Camera
    • Calibration
    • DetectionNetwork
    • Events
    • FeatureTracker
    • Gate
    • HostNodes
    • ImageAlign
    • ImageManip
    • IMU
    • Misc
    • Model Zoo
    • NeuralDepth
    • NeuralNetwork
    • ObjectTracker
    • RecordReplay
    • RGBD
    • Script
    • SpatialDetectionNetwork
    • SpatialLocationCalculator
    • StereoDepth
    • Sync
    • VideoEncoder
    • Visualizer
    • Warp
    • RVC2-specific
      • EdgeDetector
      • NNArchive
      • SystemLogger
      • Thermal
      • Tof
      • VSLAM
  • Advanced Tutorials
  • API Reference
  • Tools
Software Stack

ON THIS PAGE

  • Pipeline
  • Source code

ToF

Supported on:RVC2
This RVC2 example shows a minimal ToF pipeline and visualizes depth with a colormap.
Unlike classic color + mono stereo-pair examples, ToF depth is produced through dai.node.ToF.
For architecture and settings, see the ToF node docs.
For a full runtime-tuning and pointcloud workflow, see ToF pointcloud + runtime filter controls (oak-examples).
This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5import numpy as np
6
7
8def colorizeDepth(frameDepth: np.ndarray) -> np.ndarray:
9    invalidMask = frameDepth == 0  # zero depth is invalid
10
11    # Log the depth, minDepth and maxDepth
12    try:
13        minDepth = np.percentile(frameDepth[frameDepth != 0], 3)
14        maxDepth = np.percentile(frameDepth[frameDepth != 0], 95)
15        logDepth = np.log(frameDepth, where=frameDepth != 0)
16        logMinDepth = np.log(minDepth)
17        logMaxDepth = np.log(maxDepth)
18        np.nan_to_num(logDepth, copy=False, nan=logMinDepth)
19        # Clip the values to be in the 0-255 range
20        logDepth = np.clip(logDepth, logMinDepth, logMaxDepth)
21
22        # Interpolate only valid logDepth values, setting the rest based on the mask
23        depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255))
24        depthFrameColor = np.nan_to_num(depthFrameColor)
25        depthFrameColor = depthFrameColor.astype(np.uint8)
26        depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET)
27        # Set invalid depth pixels to black
28        depthFrameColor[invalidMask] = 0
29    except IndexError:
30        # Frame is likely empty
31        depthFrameColor = np.zeros(
32            (frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8
33        )
34    except Exception as e:
35        raise e
36    return depthFrameColor
37
38
39def main():
40    pipeline = dai.Pipeline()
41
42    # ToF node
43    socket, preset_mode = dai.CameraBoardSocket.AUTO, dai.ImageFiltersPresetMode.TOF_MID_RANGE
44    tof = pipeline.create(dai.node.ToF).build(socket, preset_mode)
45
46    # Output queues
47    depthQueue = tof.depth.createOutputQueue()
48    depthRawQueue = tof.rawDepth.createOutputQueue()
49
50    with pipeline as p:
51        p.start()
52        while p.isRunning():
53            ## Visualize raw depth (unfiltered depth directly from the ToF sensor)
54            depthRaw: dai.ImgFrame = depthRawQueue.get()
55            depthRawImage = colorizeDepth(depthRaw.getFrame())
56            cv2.imshow("depthRaw", depthRawImage)
57
58            ## Visualize depth (which is filtered depthRaw)
59            depth: dai.ImgFrame = depthQueue.get()
60            depthImage = colorizeDepth(depth.getFrame())
61            cv2.imshow("depth", depthImage)
62
63            if cv2.waitKey(1) == ord("q"):
64                break
65
66
67if __name__ == "__main__":
68    main()

C++

1#include <iostream>
2#include <opencv2/opencv.hpp>
3#include <xtensor/containers/xadapt.hpp>
4#include <xtensor/containers/xarray.hpp>
5
6#include "depthai/depthai.hpp"
7
8cv::Mat colorizeDepth(const cv::Mat& frameDepth) {
9    // -----------------------------------------------------------------------
10    // 1.  Basic checks & convert to CV_32F
11    // -----------------------------------------------------------------------
12    if(frameDepth.empty() || frameDepth.channels() != 1) return cv::Mat::zeros(frameDepth.size(), CV_8UC3);
13
14    cv::Mat depth32f;
15    frameDepth.convertTo(depth32f, CV_32F);  // safe for any input type
16
17    // -----------------------------------------------------------------------
18    // 2.  Build mask of valid (non-zero) pixels
19    // -----------------------------------------------------------------------
20    const cv::Mat nonZeroMask = depth32f != 0.0f;
21    const int nz = cv::countNonZero(nonZeroMask);
22    if(nz == 0) return cv::Mat::zeros(frameDepth.size(), CV_8UC3);
23
24    // -----------------------------------------------------------------------
25    // 3.  3 % / 95 % percentiles (identical to Python version)
26    // -----------------------------------------------------------------------
27    std::vector<float> values;
28    values.reserve(nz);
29    for(int r = 0; r < depth32f.rows; ++r) {
30        const float* d = depth32f.ptr<float>(r);
31        const uchar* m = nonZeroMask.ptr<uchar>(r);
32        for(int c = 0; c < depth32f.cols; ++c)
33            if(m[c]) values.push_back(d[c]);
34    }
35
36    std::sort(values.begin(), values.end());
37    auto pct = [&](double p) {
38        size_t idx = static_cast<size_t>(std::round((p / 100.0) * (values.size() - 1)));
39        return values[idx];
40    };
41
42    const float minDepth = pct(3.0);
43    const float maxDepth = pct(95.0);
44
45    // -----------------------------------------------------------------------
46    // 4.  Logarithm (zeros replaced by minDepth to avoid -inf)
47    // -----------------------------------------------------------------------
48    cv::Mat logDepth;
49    depth32f.copyTo(logDepth);
50    logDepth.setTo(minDepth, ~nonZeroMask);  // overwrite zeros
51    cv::log(logDepth, logDepth);
52
53    const float logMinDepth = std::log(minDepth);
54    const float logMaxDepth = std::log(maxDepth);
55
56    // -----------------------------------------------------------------------
57    // 5.  Clip & linearly scale to [0,255]  (same as np.interp)
58    // -----------------------------------------------------------------------
59    cv::min(logDepth, logMaxDepth, logDepth);
60    cv::max(logDepth, logMinDepth, logDepth);
61    logDepth = (logDepth - logMinDepth) * (255.0f / (logMaxDepth - logMinDepth));
62
63    cv::Mat depth8U;
64    logDepth.convertTo(depth8U, CV_8U);
65
66    // -----------------------------------------------------------------------
67    // 6.  Colour map + set invalid pixels to black
68    // -----------------------------------------------------------------------
69    cv::Mat depthFrameColor;
70    cv::applyColorMap(depth8U, depthFrameColor, cv::COLORMAP_JET);
71    depthFrameColor.setTo(cv::Scalar::all(0), ~nonZeroMask);
72
73    return depthFrameColor;
74}
75
76int main() {
77    dai::Pipeline pipeline;
78
79    // ToF node
80    dai::CameraBoardSocket socket = dai::CameraBoardSocket::AUTO;
81    dai::ImageFiltersPresetMode presetMode = dai::ImageFiltersPresetMode::TOF_MID_RANGE;
82    std::shared_ptr<dai::node::ToF> tof = pipeline.create<dai::node::ToF>()->build(socket, presetMode);
83
84    // Output queues
85    std::shared_ptr<dai::MessageQueue> depthQueue = tof->depth.createOutputQueue();
86    std::shared_ptr<dai::MessageQueue> depthRawQueue = tof->rawDepth.createOutputQueue();
87
88    pipeline.start();
89    while(pipeline.isRunning()) {
90        // Visualize raw depth (unfiltered depth directly from the ToF sensor)
91        std::shared_ptr<dai::ImgFrame> depthRaw = depthRawQueue->get<dai::ImgFrame>();
92        cv::Mat depthRawImage = colorizeDepth(depthRaw->getCvFrame());
93        cv::imshow("depthRaw", depthRawImage);
94
95        // Visualize depth (which is filtered depthRaw)
96        std::shared_ptr<dai::ImgFrame> depth = depthQueue->get<dai::ImgFrame>();
97        cv::Mat depthImage = colorizeDepth(depth->getCvFrame());
98        cv::imshow("depth", depthImage);
99
100        if(cv::waitKey(1) == 'q') break;
101    }
102
103    return 0;
104}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.