Depth and Video Sync

This example demonstrates the use of the DepthAI Sync node to synchronize output from StereoDepth and Color Camera nodes. It showcases how to process and display disparity maps from stereo cameras and video frames from a color camera in real time.

Similar samples:

Demo

Depth and Video Sync Demo

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script

git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py

For additional information, please follow installation guide

Source code

Also available on GitHub

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import depthai as dai
import numpy as np
import cv2
from datetime import timedelta

pipeline = dai.Pipeline()

monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
color = pipeline.create(dai.node.ColorCamera)
stereo = pipeline.create(dai.node.StereoDepth)
sync = pipeline.create(dai.node.Sync)

xoutGrp = pipeline.create(dai.node.XLinkOut)

xoutGrp.setStreamName("xout")

monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setCamera("left")
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setCamera("right")

stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_ACCURACY)

color.setCamera("color")

sync.setSyncThreshold(timedelta(milliseconds=50))

monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)

stereo.disparity.link(sync.inputs["disparity"])
color.video.link(sync.inputs["video"])

sync.out.link(xoutGrp.input)

disparityMultiplier = 255.0 / stereo.initialConfig.getMaxDisparity()
with dai.Device(pipeline) as device:
    queue = device.getOutputQueue("xout", 10, False)
    while True:
        msgGrp = queue.get()
        for name, msg in msgGrp:
            frame = msg.getCvFrame()
            if name == "disparity":
                frame = (frame * disparityMultiplier).astype(np.uint8)
                frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
            cv2.imshow(name, frame)
        if cv2.waitKey(1) == ord("q"):
            break

Also available on GitHub

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#include <iostream>

// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"

int main() {
    // Create pipeline
    dai::Pipeline pipeline;

    // Define sources and outputs
    auto monoLeft = pipeline.create<dai::node::MonoCamera>();
    auto monoRight = pipeline.create<dai::node::MonoCamera>();
    auto color = pipeline.create<dai::node::ColorCamera>();
    auto stereo = pipeline.create<dai::node::StereoDepth>();
    auto sync = pipeline.create<dai::node::Sync>();

    auto xoutGrp = pipeline.create<dai::node::XLinkOut>();

    // XLinkOut
    xoutGrp->setStreamName("xout");

    // Properties
    monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
    monoLeft->setCamera("left");
    monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
    monoRight->setCamera("right");

    stereo->setDefaultProfilePreset(dai::node::StereoDepth::PresetMode::HIGH_ACCURACY);

    color->setCamera("color");

    sync->setSyncThreshold(std::chrono::milliseconds(100));

    // Linking
    monoLeft->out.link(stereo->left);
    monoRight->out.link(stereo->right);

    stereo->disparity.link(sync->inputs["disparity"]);
    color->video.link(sync->inputs["video"]);

    sync->out.link(xoutGrp->input);

    // Connect to device and start pipeline
    dai::Device device(pipeline);

    auto queue = device.getOutputQueue("xout", 10, true);

    float disparityMultiplier = 255 / stereo->initialConfig.getMaxDisparity();

    while(true) {
        auto msgGrp = queue->get<dai::MessageGroup>();
        for(auto& frm : *msgGrp) {
            auto imgFrm = std::dynamic_pointer_cast<dai::ImgFrame>(frm.second);
            cv::Mat img = imgFrm->getCvFrame();
            if(frm.first == "disparity") {
                img.convertTo(img, CV_8UC1, disparityMultiplier);  // Extend disparity range
                cv::applyColorMap(img, img, cv::COLORMAP_JET);
            }
            cv::imshow(frm.first, img);
        }

        int key = cv::waitKey(1);
        if(key == 'q' || key == 'Q') {
            return 0;
        }
    }
    return 0;
}

How it Works

  1. Initialize MonoCamera nodes for left and right cameras.

  2. Set up a ColorCamera node.

  3. Create a StereoDepth node for depth perception.

  4. Configure the Sync node to synchronize disparity from the StereoDepth node and video frames from the ColorCamera node.

  5. Display the synchronized frames using OpenCV. Frames are synchronized to threshold of 50 milliseconds.

Got questions?

Head over to Discussion Forum for technical support or any other questions you might have.