DepthAI
Software Stack

ON THIS PAGE

  • Pipeline
  • Source code

Camera multiple outputs

Supported on:RVC2RVC4
Example showcases DepthAIv3's functionality to request an output stream directly from the Camera node, instead of having to create and configure multiple ImageManip nodes.
Python
1output1 = camera_node.requestOutput(
2    size=(640, 480),
3    type=dai.ImgFrame.Type.BGR888p,
4    resize_mode=dai.ImgResizeMode.CROP,
5    fps=15
6)
7output2 = cam.requestOutput(
8    size=(1000, 500),
9    type=dai.ImgFrame.Type.NV12,
10    resize_mode=dai.ImgResizeMode.STRETCH,
11    fps=20
12)
Resize mode can be either CROP, STRETCH, or LETTERBOX, which come into play if there's a missmatch between sensor aspect ratio (AR) and requested aspect ratio. For more information (pros/cons of each) check Input frame AR missmatch documentation.This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2
3import sys
4
5import cv2
6import depthai as dai
7import time
8
9# Create pipeline
10
11
12def exit_usage() -> None:
13    print(
14        "WRONG USAGE! correct usage example:\n"
15        "python camera_multiple_outputs.py 640 480 0 30 CAM_A 300 300 0 30 CAM_A 300 300 1 30 CAM_A \n"
16        "where 0 is resize mode: 0 == CROP, 1 == STRETCH, 2 == LETTERBOX\n"
17        "and 30 is FPS"
18    )
19    exit(1)
20
21class FPSCounter:
22    def __init__(self):
23        self.frameTimes = []
24
25    def tick(self):
26        now = time.time()
27        self.frameTimes.append(now)
28        self.frameTimes = self.frameTimes[-100:]
29
30    def getFps(self):
31        if len(self.frameTimes) <= 1:
32            return 0
33        # Calculate the FPS
34        return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0])
35
36
37args = sys.argv[1:]
38if len(args) < 5 or len(args) % 5 != 0:
39    exit_usage()
40
41with dai.Pipeline() as pipeline:
42    cams: dict = {}
43    queues = []
44    for i in range(0, len(args), 5):
45        cap = dai.ImgFrameCapability()
46        cap.size.fixed((int(args[i]), int(args[i + 1])))
47        cropArg = int(args[i + 2])
48        if cropArg == 0:
49            cap.resizeMode = dai.ImgResizeMode.CROP
50        elif cropArg == 1:
51            cap.resizeMode = dai.ImgResizeMode.STRETCH
52        elif cropArg == 2:
53            cap.resizeMode = dai.ImgResizeMode.LETTERBOX
54        else:
55            exit_usage()
56        cap.fps.fixed(float(args[i + 3]))
57        camArg = args[i + 4]
58        socket: dai.CameraBoardSocket
59        if camArg == "CAM_A":
60            socket = dai.CameraBoardSocket.CAM_A
61        elif camArg == "CAM_B":
62            socket = dai.CameraBoardSocket.CAM_B
63        elif camArg == "CAM_C":
64            socket = dai.CameraBoardSocket.CAM_C
65        elif camArg == "CAM_D":
66            socket = dai.CameraBoardSocket.CAM_D
67        else:
68            exit_usage()
69        if socket not in cams:
70            cams[socket] = pipeline.create(dai.node.Camera).build(socket)
71        queues.append(cams[socket].requestOutput(cap, True).createOutputQueue())
72
73    # Connect to device and start pipeline
74    pipeline.start()
75    FPSCounters = [FPSCounter() for _ in queues]
76    while pipeline.isRunning():
77        for index, queue in enumerate(queues):
78            videoIn = queue.tryGet()
79            if videoIn is not None:
80                FPSCounters[index].tick()
81                assert isinstance(videoIn, dai.ImgFrame)
82                print(
83                    f"frame {videoIn.getWidth()}x{videoIn.getHeight()} | {videoIn.getSequenceNum()}: exposure={videoIn.getExposureTime()}us, timestamp: {videoIn.getTimestampDevice()}"
84                )
85                # Get BGR frame from NV12 encoded video frame to show with opencv
86                # Visualizing the frame on slower hosts might have overhead
87                cvFrame = videoIn.getCvFrame()
88                # Draw FPS
89                cv2.putText(cvFrame, f"{FPSCounters[index].getFps():.2f} FPS", (2, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
90                cv2.imshow("video " + str(index), cvFrame)
91
92        if cv2.waitKey(1) == ord("q"):
93            break

C++

1#include <chrono>
2#include <iostream>
3#include <map>
4#include <memory>
5#include <opencv2/opencv.hpp>
6#include <vector>
7
8#include "depthai/depthai.hpp"
9
10class FPSCounter {
11   public:
12    void tick() {
13        auto now = std::chrono::steady_clock::now();
14        frameTimes.push_back(now);
15        if(frameTimes.size() > 100) {
16            frameTimes.erase(frameTimes.begin());
17        }
18    }
19
20    float getFps() {
21        if(frameTimes.size() <= 1) return 0.0f;
22        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(frameTimes.back() - frameTimes.front()).count();
23        return (frameTimes.size() - 1) * 1000.0f / duration;
24    }
25
26   private:
27    std::vector<std::chrono::steady_clock::time_point> frameTimes;
28};
29
30void exitUsage() {
31    std::cout << "WRONG USAGE! correct usage example:\n"
32              << "./camera_multiple_outputs 640 480 0 30 CAM_A 300 300 0 30 CAM_A 300 300 1 30 CAM_A\n"
33              << "where 0 is resize mode: 0 == CROP, 1 == STRETCH, 2 == LETTERBOX\n"
34              << "and 30 is FPS" << std::endl;
35    exit(1);
36}
37
38int main(int argc, char* argv[]) {
39    if(argc < 6 || (argc - 1) % 5 != 0) {
40        exitUsage();
41    }
42
43    // Create device
44    std::shared_ptr<dai::Device> device = std::make_shared<dai::Device>();
45
46    // Create pipeline
47    dai::Pipeline pipeline(device);
48
49    // Parse arguments and create cameras
50    std::map<dai::CameraBoardSocket, std::shared_ptr<dai::node::Camera>> cams;
51    std::vector<std::shared_ptr<dai::MessageQueue>> queues;
52    std::vector<FPSCounter> fpsCounters;
53
54    for(int i = 1; i < argc; i += 5) {
55        int width = std::stoi(argv[i]);
56        int height = std::stoi(argv[i + 1]);
57        int resizeMode = std::stoi(argv[i + 2]);
58        float fps = std::stof(argv[i + 3]);
59        std::string camArg = argv[i + 4];
60
61        // Create capability
62        auto cap = std::make_shared<dai::ImgFrameCapability>();
63        cap->size.fixed(std::make_pair(width, height));
64
65        // Set resize mode
66        switch(resizeMode) {
67            case 0:
68                cap->resizeMode = dai::ImgResizeMode::CROP;
69                break;
70            case 1:
71                cap->resizeMode = dai::ImgResizeMode::STRETCH;
72                break;
73            case 2:
74                cap->resizeMode = dai::ImgResizeMode::LETTERBOX;
75                break;
76            default:
77                exitUsage();
78        }
79
80        cap->fps.fixed(fps);
81
82        // Parse camera socket
83        dai::CameraBoardSocket socket;
84        if(camArg == "CAM_A")
85            socket = dai::CameraBoardSocket::CAM_A;
86        else if(camArg == "CAM_B")
87            socket = dai::CameraBoardSocket::CAM_B;
88        else if(camArg == "CAM_C")
89            socket = dai::CameraBoardSocket::CAM_C;
90        else if(camArg == "CAM_D")
91            socket = dai::CameraBoardSocket::CAM_D;
92        else
93            exitUsage();
94
95        // Create camera if not exists
96        if(cams.find(socket) == cams.end()) {
97            cams[socket] = pipeline.create<dai::node::Camera>()->build(socket);
98        }
99
100        // Create output queue
101        queues.push_back(cams[socket]->requestOutput(*cap, true)->createOutputQueue());
102        fpsCounters.push_back(FPSCounter());
103    }
104
105    // Start pipeline
106    pipeline.start();
107
108    while(true) {
109        for(size_t i = 0; i < queues.size(); i++) {
110            auto videoIn = queues[i]->tryGet<dai::ImgFrame>();
111            if(videoIn != nullptr) {
112                fpsCounters[i].tick();
113                std::cout << "frame " << videoIn->getWidth() << "x" << videoIn->getHeight() << " | " << videoIn->getSequenceNum()
114                          << ": exposure=" << videoIn->getExposureTime().count()
115                          << "us, timestamp: " << videoIn->getTimestampDevice().time_since_epoch().count() << std::endl;
116
117                cv::Mat cvFrame = videoIn->getCvFrame();
118
119                // Draw FPS
120                cv::putText(cvFrame,
121                            std::to_string(fpsCounters[i].getFps()).substr(0, 4) + " FPS",
122                            cv::Point(2, 20),
123                            cv::FONT_HERSHEY_SIMPLEX,
124                            0.5,
125                            cv::Scalar(0, 255, 0));
126
127                cv::imshow("video " + std::to_string(i), cvFrame);
128            }
129        }
130
131        if(cv::waitKey(1) == 'q') {
132            break;
133        }
134    }
135
136    return 0;
137}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.