DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • ToF Depth
  • Demo
  • Setup
  • Source code
  • Pipeline

ToF Depth

This is a sample code that showcases how to use the ToF sensor. The ToF node converts raw data from the ToF sensor into a depth map.

Demo

This demo was recorded using the OAK-D SR PoE that's why we selected CAM_A port on the ToF sensor.

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python
GitHub
1#!/usr/bin/env python3
2
3import time
4import cv2
5import depthai as dai
6import numpy as np
7
8print(dai.__version__)
9
10cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET)
11cvColorMap[0] = [0, 0, 0]
12
13def create_pipeline():
14    pipeline = dai.Pipeline()
15
16    tof = pipeline.create(dai.node.ToF)
17
18    # Configure the ToF node
19    tofConfig = tof.initialConfig.get()
20
21    # Optional. Best accuracy, but adds motion blur.
22    # see ToF node docs on how to reduce/eliminate motion blur.
23    tofConfig.enableOpticalCorrection = True
24    tofConfig.enablePhaseShuffleTemporalFilter = True
25    tofConfig.phaseUnwrappingLevel = 4
26    tofConfig.phaseUnwrapErrorThreshold = 300
27
28    tofConfig.enableTemperatureCorrection = False # Not yet supported
29
30    xinTofConfig = pipeline.create(dai.node.XLinkIn)
31    xinTofConfig.setStreamName("tofConfig")
32    xinTofConfig.out.link(tof.inputConfig)
33
34    tof.initialConfig.set(tofConfig)
35
36    cam_tof = pipeline.create(dai.node.Camera)
37    cam_tof.setFps(60) # ToF node will produce depth frames at /2 of this rate
38    cam_tof.setBoardSocket(dai.CameraBoardSocket.CAM_A)
39    cam_tof.raw.link(tof.input)
40
41    xout = pipeline.create(dai.node.XLinkOut)
42    xout.setStreamName("depth")
43    tof.depth.link(xout.input)
44
45    tofConfig = tof.initialConfig.get()
46
47    return pipeline, tofConfig
48
49
50if __name__ == '__main__':
51    pipeline, tofConfig = create_pipeline()
52
53    with dai.Device(pipeline) as device:
54        print('Connected cameras:', device.getConnectedCameraFeatures())
55        qDepth = device.getOutputQueue(name="depth")
56
57        tofConfigInQueue = device.getInputQueue("tofConfig")
58
59        counter = 0
60        while True:
61            start = time.time()
62            key = cv2.waitKey(1)
63            if key == ord('f'):
64                tofConfig.enableFPPNCorrection = not tofConfig.enableFPPNCorrection
65                tofConfigInQueue.send(tofConfig)
66            elif key == ord('o'):
67                tofConfig.enableOpticalCorrection = not tofConfig.enableOpticalCorrection
68                tofConfigInQueue.send(tofConfig)
69            elif key == ord('w'):
70                tofConfig.enableWiggleCorrection = not tofConfig.enableWiggleCorrection
71                tofConfigInQueue.send(tofConfig)
72            elif key == ord('t'):
73                tofConfig.enableTemperatureCorrection = not tofConfig.enableTemperatureCorrection
74                tofConfigInQueue.send(tofConfig)
75            elif key == ord('q'):
76                break
77            elif key == ord('0'):
78                tofConfig.enablePhaseUnwrapping = False
79                tofConfig.phaseUnwrappingLevel = 0
80                tofConfigInQueue.send(tofConfig)
81            elif key == ord('1'):
82                tofConfig.enablePhaseUnwrapping = True
83                tofConfig.phaseUnwrappingLevel = 1
84                tofConfigInQueue.send(tofConfig)
85            elif key == ord('2'):
86                tofConfig.enablePhaseUnwrapping = True
87                tofConfig.phaseUnwrappingLevel = 2
88                tofConfigInQueue.send(tofConfig)
89            elif key == ord('3'):
90                tofConfig.enablePhaseUnwrapping = True
91                tofConfig.phaseUnwrappingLevel = 3
92                tofConfigInQueue.send(tofConfig)
93            elif key == ord('4'):
94                tofConfig.enablePhaseUnwrapping = True
95                tofConfig.phaseUnwrappingLevel = 4
96                tofConfigInQueue.send(tofConfig)
97            elif key == ord('5'):
98                tofConfig.enablePhaseUnwrapping = True
99                tofConfig.phaseUnwrappingLevel = 5
100                tofConfigInQueue.send(tofConfig)
101            elif key == ord('m'):
102                medianSettings = [dai.MedianFilter.MEDIAN_OFF, dai.MedianFilter.KERNEL_3x3, dai.MedianFilter.KERNEL_5x5,
103                                  dai.MedianFilter.KERNEL_7x7]
104                currentMedian = tofConfig.median
105                nextMedian = medianSettings[(medianSettings.index(currentMedian) + 1) % len(medianSettings)]
106                print(f"Changing median to {nextMedian.name} from {currentMedian.name}")
107                tofConfig.median = nextMedian
108                tofConfigInQueue.send(tofConfig)
109
110            imgFrame = qDepth.get()  # blocking call, will wait until a new data has arrived
111            depth_map = imgFrame.getFrame()
112            max_depth = (tofConfig.phaseUnwrappingLevel + 1) * 1500 # 100MHz modulation freq.
113            depth_colorized = np.interp(depth_map, (0, max_depth), (0, 255)).astype(np.uint8)
114            depth_colorized = cv2.applyColorMap(depth_colorized, cvColorMap)
115
116            cv2.imshow("Colorized depth", depth_colorized)
117            counter += 1
118
119    device.close()

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.