PointCloud Control
This example demonstrates how to usePointCloudConfig
message to dynamically update the transformation matrix of a point cloud and visualize the transformed point cloud using Open3D.The transformation matrix is updated to make it look like the point cloud is rotating about the Z-axis. This is achieved by first moving the point cloud along the Y axis:Python
1# Move the point cloud along Y axis
2translate_y_matrix = np.array([
3 [1, 0, 0, 0],
4 [0, 1, 0, 500],
5 [0, 0, 1, 0],
6 [0, 0, 0, 1]
7])
Python
1# Rotate the point cloud about Z axis
2rotate_z_matrix = np.array([
3 [np.cos(angle), -np.sin(angle), 0, 0],
4 [np.sin(angle), np.cos(angle), 0, 0],
5 [0, 0, 1, 0],
6 [0, 0, 0, 1]
7])
Demo
Setup
Ensure DepthAI and Open3D are installed in your Python environment:Command Line
1python3 -m pip install depthai open3d
Source code
The example initializes the DepthAI pipeline with color and mono cameras and a stereo depth node to generate depth information. It then creates a point cloud node, dynamically updates its transformation matrix based on a rotation value, and visualizes this transformed point cloud using Open3D. Each point's color corresponds to the color image captured by the RGB camera.Python
Python
PythonGitHub
1#!/usr/bin/env python3
2
3import depthai as dai
4import numpy as np
5import cv2
6import sys
7
8try:
9 import open3d as o3d
10except ImportError:
11 sys.exit(f"Critical dependency missing: Open3D. Please install it using the command: '{sys.executable} -m pip install open3d' and then rerun the script.")
12
13
14# Continue with the DepthAI and Open3D setup as before
15pipeline = dai.Pipeline()
16camRgb = pipeline.create(dai.node.ColorCamera)
17monoLeft = pipeline.create(dai.node.MonoCamera)
18monoRight = pipeline.create(dai.node.MonoCamera)
19depth = pipeline.create(dai.node.StereoDepth)
20pointcloud: dai.node.PointCloud = pipeline.create(dai.node.PointCloud)
21sync = pipeline.create(dai.node.Sync)
22xOut = pipeline.create(dai.node.XLinkOut)
23xOut.input.setBlocking(False)
24
25
26camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
27camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
28camRgb.setIspScale(1,3)
29
30monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
31monoLeft.setCamera("left")
32
33monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
34monoRight.setCamera("right")
35
36
37depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
38depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
39depth.setLeftRightCheck(True)
40depth.setExtendedDisparity(False)
41depth.setSubpixel(True)
42depth.setDepthAlign(dai.CameraBoardSocket.CAM_A)
43config = depth.initialConfig.get()
44config.postProcessing.thresholdFilter.minRange = 200
45config.postProcessing.thresholdFilter.maxRange = 1000
46depth.initialConfig.set(config)
47
48monoLeft.out.link(depth.left)
49monoRight.out.link(depth.right)
50depth.depth.link(pointcloud.inputDepth)
51camRgb.isp.link(sync.inputs["rgb"])
52pointcloud.outputPointCloud.link(sync.inputs["pcl"])
53pointcloud.initialConfig.setSparse(False)
54sync.out.link(xOut.input)
55xOut.setStreamName("out")
56
57inConfig = pipeline.create(dai.node.XLinkIn)
58inConfig.setStreamName("config")
59inConfig.out.link(pointcloud.inputConfig)
60
61with dai.Device(pipeline) as device:
62
63 isRunning = True
64 def key_callback(vis, action, mods):
65 global isRunning
66 if action == 0:
67 isRunning = False
68
69 q = device.getOutputQueue(name="out", maxSize=4, blocking=False)
70 pclConfIn = device.getInputQueue(name="config", maxSize=4, blocking=False)
71
72 vis = o3d.visualization.Visualizer()
73 vis.create_window()
74 pcd = o3d.geometry.PointCloud()
75 coordinateFrame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1000, origin=[0,0,0])
76 vis.add_geometry(coordinateFrame)
77 vis.add_geometry(pcd)
78
79 first = True
80 rot = 0
81 while device.isPipelineRunning():
82
83 inMessage = q.get()
84 inColor = inMessage["rgb"]
85 inPointCloud = inMessage["pcl"]
86 cvColorFrame = inColor.getCvFrame()
87
88 cvRGBFrame = cv2.cvtColor(cvColorFrame, cv2.COLOR_BGR2RGB)
89
90 if inPointCloud:
91 points = inPointCloud.getPoints().astype(np.float64)
92 pcd.points = o3d.utility.Vector3dVector(points)
93 colors = (cvRGBFrame.reshape(-1, 3) / 255.0).astype(np.float64)
94 pcd.colors = o3d.utility.Vector3dVector(colors)
95
96 vis.update_geometry(pcd)
97 vis.poll_events()
98 vis.update_renderer()
99
100 # Create a transformation matrix that rotates around all 3 axes at the same time
101 rotate_z_matrix = np.array([[np.cos(rot), -np.sin(rot), 0, 0],
102 [np.sin(rot), np.cos(rot), 0, 0],
103 [0, 0, 1, 0],
104 [0, 0, 0, 1]])
105
106 translate_y_matrix = np.array([[1, 0, 0, 0],
107 [0, 1, 0, 500],
108 [0, 0, 1, 0],
109 [0, 0, 0, 1]])
110
111
112 # Combine the 3 transformation matrices into one
113 transformation_matrix = rotate_z_matrix @ translate_y_matrix
114
115 rot += 0.05
116
117 pclConfig = dai.PointCloudConfig()
118 pclConfig.setTransformationMatrix(transformation_matrix)
119 pclConfig.setSparse(False)
120 pclConfIn.send(pclConfig)
121
122
123 vis.destroy_window()
124 cv2.destroyAllWindows()
Pipeline
Need assistance?
Head over to Discussion Forum for technical support or any other questions you might have.