# Camera multiple outputs

Example showcases DepthAIv3's functionality to request an output stream directly from the
[Camera](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/camera.md) node, instead of having to create and
configure multiple [ImageManip](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/image_manip.md) nodes.

```python
output1 = camera_node.requestOutput(
    size=(640, 480),
    type=dai.ImgFrame.Type.BGR888p,
    resize_mode=dai.ImgResizeMode.CROP,
    fps=15
)
output2 = cam.requestOutput(
    size=(1000, 500),
    type=dai.ImgFrame.Type.NV12,
    resize_mode=dai.ImgResizeMode.STRETCH,
    fps=20
)
```

Resize mode can be either CROP, STRETCH, or LETTERBOX, which come into play if there's a missmatch between sensor aspect ratio
(AR) and requested aspect ratio. For more information (pros/cons of each) check [Input frame AR
missmatch](https://docs.luxonis.com/software-v3/depthai/tutorials/resolution-techniques.md) documentation.

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/camera_multiple_outputs.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 5, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "1", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "2", "node1OutputGroup": "dynamicOutputs", "node2Id": 1, "node2Input": "in", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[5, {"alias": "", "id": 5, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 8, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_0"}}], [3, {"alias": "", "id": 3, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 7, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_1"}}], [1, {"alias": "", "id": 1, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 6, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_2"}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 5, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["dynamicOutputs", "1"], {"blocking": false, "group": "dynamicOutputs", "id": 4, "name": "1", "queueSize": 8, "type": 0, "waitForMessage": false}], [["dynamicOutputs", "2"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "2", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 100663297, "aeRegion": {"height": 0, "priority": 3108157120, "width": 0, "x": 4163, "y": 1}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 22767, "y": 0}, "antiBandingMode": 66, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 8, "brightness": -86, "captureIntent": 185, "chromaDenoise": 0, "cmdMask": 0, "contrast": 43, "controlMode": 239, "effectMode": 88, "enableHdr": false, "expCompensation": -41, "expManual": {"exposureTimeUs": 22767, "frameDurationUs": 22767, "sensitivityIso": 3108157228}, "frameSyncMode": 0, "lensPosAutoInfinity": 64, "lensPosAutoMacro": 183, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 25, "sceneMode": 183, "sharpness": 0, "strobeConfig": {"activeLevel": 104, "enable": 0, "gpioNumber": -67}, "strobeTimings": {"durationUs": 4, "exposureBeginOffsetUs": 22767, "exposureEndOffsetUs": 17}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": {"index": 0, "value": 30.0}}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [640, 480]}}, "type": null}, {"enableUndistortion": null, "fps": {"value": {"index": 0, "value": 30.0}}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [300, 300]}}, "type": null}, {"enableUndistortion": null, "fps": {"value": {"index": 0, "value": 30.0}}, "resizeMode": 1, "size": {"value": {"index": 0, "value": [300, 300]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3

import sys

import cv2
import depthai as dai
import time

# Create pipeline

def exit_usage() -> None:
    print(
        "WRONG USAGE! correct usage example:\n"
        "python camera_multiple_outputs.py 640 480 0 30 CAM_A 300 300 0 30 CAM_A 300 300 1 30 CAM_A \n"
        "where 0 is resize mode: 0 == CROP, 1 == STRETCH, 2 == LETTERBOX\n"
        "and 30 is FPS"
    )
    exit(1)

class FPSCounter:
    def __init__(self):
        self.frameTimes = []

    def tick(self):
        now = time.time()
        self.frameTimes.append(now)
        self.frameTimes = self.frameTimes[-100:]

    def getFps(self):
        if len(self.frameTimes) <= 1:
            return 0
        # Calculate the FPS
        return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0])

args = sys.argv[1:]
if len(args) < 5 or len(args) % 5 != 0:
    exit_usage()

with dai.Pipeline() as pipeline:
    cams: dict = {}
    queues = []
    for i in range(0, len(args), 5):
        cap = dai.ImgFrameCapability()
        cap.size.fixed((int(args[i]), int(args[i + 1])))
        cropArg = int(args[i + 2])
        if cropArg == 0:
            cap.resizeMode = dai.ImgResizeMode.CROP
        elif cropArg == 1:
            cap.resizeMode = dai.ImgResizeMode.STRETCH
        elif cropArg == 2:
            cap.resizeMode = dai.ImgResizeMode.LETTERBOX
        else:
            exit_usage()
        cap.fps.fixed(float(args[i + 3]))
        camArg = args[i + 4]
        socket: dai.CameraBoardSocket
        if camArg == "CAM_A":
            socket = dai.CameraBoardSocket.CAM_A
        elif camArg == "CAM_B":
            socket = dai.CameraBoardSocket.CAM_B
        elif camArg == "CAM_C":
            socket = dai.CameraBoardSocket.CAM_C
        elif camArg == "CAM_D":
            socket = dai.CameraBoardSocket.CAM_D
        else:
            exit_usage()
        if socket not in cams:
            cams[socket] = pipeline.create(dai.node.Camera).build(socket)
        queues.append(cams[socket].requestOutput(cap, True).createOutputQueue())

    # Connect to device and start pipeline
    pipeline.start()
    FPSCounters = [FPSCounter() for _ in queues]
    while pipeline.isRunning():
        for index, queue in enumerate(queues):
            videoIn = queue.tryGet()
            if videoIn is not None:
                FPSCounters[index].tick()
                assert isinstance(videoIn, dai.ImgFrame)
                print(
                    f"frame {videoIn.getWidth()}x{videoIn.getHeight()} | {videoIn.getSequenceNum()}: exposure={videoIn.getExposureTime()}us, timestamp: {videoIn.getTimestampDevice()}"
                )
                # Get BGR frame from NV12 encoded video frame to show with opencv
                # Visualizing the frame on slower hosts might have overhead
                cvFrame = videoIn.getCvFrame()
                # Draw FPS
                cv2.putText(cvFrame, f"{FPSCounters[index].getFps():.2f} FPS", (2, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
                cv2.imshow("video " + str(index), cvFrame)

        if cv2.waitKey(1) == ord("q"):
            break
```

#### C++

```cpp
#include <atomic>
#include <chrono>
#include <csignal>
#include <iostream>
#include <map>
#include <memory>
#include <opencv2/opencv.hpp>
#include <vector>

#include "depthai/depthai.hpp"

std::atomic<bool> quitEvent(false);

void signalHandler(int) {
    quitEvent = true;
}

class FPSCounter {
   public:
    void tick() {
        auto now = std::chrono::steady_clock::now();
        frameTimes.push_back(now);
        if(frameTimes.size() > 100) {
            frameTimes.erase(frameTimes.begin());
        }
    }

    float getFps() {
        if(frameTimes.size() <= 1) return 0.0f;
        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(frameTimes.back() - frameTimes.front()).count();
        return (frameTimes.size() - 1) * 1000.0f / duration;
    }

   private:
    std::vector<std::chrono::steady_clock::time_point> frameTimes;
};

void exitUsage() {
    std::cout << "WRONG USAGE! correct usage example:\n"
              << "./camera_multiple_outputs 640 480 0 30 CAM_A 300 300 0 30 CAM_A 300 300 1 30 CAM_A\n"
              << "where 0 is resize mode: 0 == CROP, 1 == STRETCH, 2 == LETTERBOX\n"
              << "and 30 is FPS" << std::endl;
    exit(1);
}

int main(int argc, char* argv[]) {
    signal(SIGTERM, signalHandler);
    signal(SIGINT, signalHandler);

    if(argc < 6 || (argc - 1) % 5 != 0) {
        exitUsage();
    }

    // Create device
    std::shared_ptr<dai::Device> device = std::make_shared<dai::Device>();

    // Create pipeline
    dai::Pipeline pipeline(device);

    // Parse arguments and create cameras
    std::map<dai::CameraBoardSocket, std::shared_ptr<dai::node::Camera>> cams;
    std::vector<std::shared_ptr<dai::MessageQueue>> queues;
    std::vector<FPSCounter> fpsCounters;

    for(int i = 1; i < argc; i += 5) {
        int width = std::stoi(argv[i]);
        int height = std::stoi(argv[i + 1]);
        int resizeMode = std::stoi(argv[i + 2]);
        float fps = std::stof(argv[i + 3]);
        std::string camArg = argv[i + 4];

        // Create capability
        auto cap = std::make_shared<dai::ImgFrameCapability>();
        cap->size.fixed(std::make_pair(width, height));

        // Set resize mode
        switch(resizeMode) {
            case 0:
                cap->resizeMode = dai::ImgResizeMode::CROP;
                break;
            case 1:
                cap->resizeMode = dai::ImgResizeMode::STRETCH;
                break;
            case 2:
                cap->resizeMode = dai::ImgResizeMode::LETTERBOX;
                break;
            default:
                exitUsage();
        }

        cap->fps.fixed(fps);

        // Parse camera socket
        dai::CameraBoardSocket socket;
        if(camArg == "CAM_A")
            socket = dai::CameraBoardSocket::CAM_A;
        else if(camArg == "CAM_B")
            socket = dai::CameraBoardSocket::CAM_B;
        else if(camArg == "CAM_C")
            socket = dai::CameraBoardSocket::CAM_C;
        else if(camArg == "CAM_D")
            socket = dai::CameraBoardSocket::CAM_D;
        else
            exitUsage();

        // Create camera if not exists
        if(cams.find(socket) == cams.end()) {
            cams[socket] = pipeline.create<dai::node::Camera>()->build(socket);
        }

        // Create output queue
        queues.push_back(cams[socket]->requestOutput(*cap, true)->createOutputQueue());
        fpsCounters.push_back(FPSCounter());
    }

    // Start pipeline
    pipeline.start();

    while(pipeline.isRunning() && !quitEvent) {
        for(size_t i = 0; i < queues.size(); i++) {
            auto videoIn = queues[i]->tryGet<dai::ImgFrame>();
            if(videoIn != nullptr) {
                fpsCounters[i].tick();
                std::cout << "frame " << videoIn->getWidth() << "x" << videoIn->getHeight() << " | " << videoIn->getSequenceNum()
                          << ": exposure=" << videoIn->getExposureTime().count()
                          << "us, timestamp: " << videoIn->getTimestampDevice().time_since_epoch().count() << std::endl;

                cv::Mat cvFrame = videoIn->getCvFrame();

                // Draw FPS
                cv::putText(cvFrame,
                            std::to_string(fpsCounters[i].getFps()).substr(0, 4) + " FPS",
                            cv::Point(2, 20),
                            cv::FONT_HERSHEY_SIMPLEX,
                            0.5,
                            cv::Scalar(0, 255, 0));

                cv::imshow("video " + std::to_string(i), cvFrame);
            }
        }

        if(cv::waitKey(1) == 'q') {
            break;
        }
    }

    pipeline.stop();
    pipeline.wait();

    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
