DepthAI V3 Porting Guide
What's new in the v3 API
- No more explicit XLink nodes - the XLink “bridges” are created automatically.
- Host nodes - nodes that run on the host machine now work cleanly with device-side nodes.
- Custom host nodes - users can create custom nodes that run on the host machine
- Both
ThreadedHostNodeandHostNodeare supported. ThreadedHostNodeworks similarly toScriptNode; the user specifies arunfunction that executes in a separate thread.HostNodeexposes an input mapinputswhose entries are implicitly synced.- Available in both Python and C++.
- Both
- Record-and-replay nodes.
Pipelinenow has a live device that can be queried during pipeline creation.- Support for the new Model Zoo.
ImageManiphas a refreshed API with better-defined behaviour.ColorCameraandMonoCameraare deprecated in favour of the newCameranode.
Minimal changes required
- Remove the explicit creation of
dai.Device(unless you intentionally pass a live device handle via the pipeline constructor – a rare edge case). - Remove explicit XLink nodes.
- Replace
dai.Device(pipeline)withpipeline.start(). - Replace any
.getOutputQueue()calls withoutput.createOutputQueue(). - Replace any
.getInputQueue()calls withinput.createInputQueue().
Quick port: simple RGB stream example
# ORIG and the new code with # NEW.Python
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5
6# Create pipeline
7pipeline = dai.Pipeline()
8
9# Define source and output
10camRgb = pipeline.create(dai.node.ColorCamera)
11
12# ORIG – explicit XLink removed in v3
13# xoutVideo = pipeline.create(dai.node.XLinkOut)
14# xoutVideo.setStreamName("video")
15
16# Properties
17camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
18camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
19camRgb.setVideoSize(1920, 1080)
20
21# Linking
22# ORIG
23# camRgb.video.link(xoutVideo.input)
24# NEW – output queue straight from the node
25videoQueue = camRgb.video.createOutputQueue()
26
27# ORIG – entire `with dai.Device` block removed
28# with dai.Device(pipeline) as device:
29# video = device.getOutputQueue(name="video", maxSize=1, blocking=False)
30# while True:
31# NEW – start the pipeline
32pipeline.start()
33while pipeline.isRunning():
34 videoIn = videoQueue.get() # blocking
35 cv2.imshow("video", videoIn.getCvFrame())
36 if cv2.waitKey(1) == ord('q'):
37 breakColorCamera/MonoCamera nodes are deprecated on RVC4; see the next section for using Camera instead.Porting ColorCamera / MonoCamera usage to Camera
Camera node can expose as many outputs as you request.Python
1camRgb = pipeline.create(dai.node.ColorCamera)
2camRgb.setPreviewSize(300, 300)
3camRgb.setInterleaved(False)
4camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
5outputQueue = camRgb.preview.createOutputQueue()Python
1camRgb = pipeline.create(dai.node.Camera).build() # don’t forget .build()
2cameraOutput = camRgb.requestOutput((300, 300), type=dai.ImgFrame.Type.RGB888p) # replaces .preview
3outputQueue = cameraOutput.createOutputQueue()requestOutput again. For full‑resolution use‑cases that previously used .isp, call requestFullResolutionOutput() instead.For former MonoCamera pipelines, replace the .out output with requestOutput, e.g.Python
1mono = pipeline.create(dai.node.Camera).build()
2monoOut = mono.requestOutput((1280, 720), type=dai.ImgFrame.Type.GRAY8)Porting the old ImageManip to the new API
v2 example
Python
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5
6# Create pipeline
7pipeline = dai.Pipeline()
8
9camRgb = pipeline.create(dai.node.ColorCamera)
10camRgb.setPreviewSize(1000, 500)
11camRgb.setInterleaved(False)
12maxFrameSize = camRgb.getPreviewHeight() * camRgb.getPreviewWidth() * 3
13
14# In this example we use 2 imageManips for splitting the original 1000x500
15# preview frame into 2 500x500 frames
16manip1 = pipeline.create(dai.node.ImageManip)
17manip1.initialConfig.setCropRect(0, 0, 0.5, 1)
18manip1.setMaxOutputFrameSize(maxFrameSize)
19camRgb.preview.link(manip1.inputImage)
20
21manip2 = pipeline.create(dai.node.ImageManip)
22manip2.initialConfig.setCropRect(0.5, 0, 1, 1)
23manip2.setMaxOutputFrameSize(maxFrameSize)
24camRgb.preview.link(manip2.inputImage)
25
26xout1 = pipeline.create(dai.node.XLinkOut)
27xout1.setStreamName('out1')
28manip1.out.link(xout1.input)
29
30xout2 = pipeline.create(dai.node.XLinkOut)
31xout2.setStreamName('out2')
32manip2.out.link(xout2.input)
33
34# Connect to device and start pipeline
35with dai.Device(pipeline) as device:
36 # Output queue will be used to get the rgb frames from the output defined above
37 q1 = device.getOutputQueue(name="out1", maxSize=4, blocking=False)
38 q2 = device.getOutputQueue(name="out2", maxSize=4, blocking=False)
39
40 while True:
41 if q1.has():
42 cv2.imshow("Tile 1", q1.get().getCvFrame())
43
44 if q2.has():
45 cv2.imshow("Tile 2", q2.get().getCvFrame())
46
47 if cv2.waitKey(1) == ord('q'):
48 breakv3 equivalent:
Python
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5
6# Create pipeline
7pipeline = dai.Pipeline()
8
9camRgb = pipeline.create(dai.node.Camera).build()
10preview = camRgb.requestOutput((1000, 500), type=dai.ImgFrame.Type.RGB888p)
11
12# In this example we use 2 imageManips for splitting the original 1000x500
13# preview frame into 2 500x500 frames
14manip1 = pipeline.create(dai.node.ImageManip)
15manip1.initialConfig.addCrop(0, 0, 500, 500)
16preview.link(manip1.inputImage)
17
18manip2 = pipeline.create(dai.node.ImageManip)
19manip2.initialConfig.addCrop(500, 0, 500, 500)
20preview.link(manip2.inputImage)
21
22q1 = manip1.out.createOutputQueue()
23q2 = manip2.out.createOutputQueue()
24
25pipeline.start()
26with pipeline:
27 while pipeline.isRunning():
28 if q1.has():
29 cv2.imshow("Tile 1", q1.get().getCvFrame())
30
31 if q2.has():
32 cv2.imshow("Tile 2", q2.get().getCvFrame())
33
34 if cv2.waitKey(1) == ord('q'):
35 break