# Camera output

Example showcases DepthAIv3's functionality to request an output stream directly from the
[Camera](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/camera.md) node, instead of having to create and
configure an [ImageManip](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/image_manip.md) node.

```python
output = camera_node.requestOutput(
    size=(640, 480),
    type=dai.ImgFrame.Type.BGR888p,
    resize_mode=dai.ImgResizeMode.CROP,
    fps=15
)
```

Resize mode can be either CROP, STRETCH, or LETTERBOX, which come into play if there's a missmatch between sensor aspect ratio
(AR) and requested aspect ratio. For more information (pros/cons of each) check [Input frame AR
missmatch](https://docs.luxonis.com/software-v3/depthai/tutorials/resolution-techniques.md) documentation.

[Camera Multiple Outputs](https://docs.luxonis.com/software-v3/depthai/examples/camera/camera_multiple_outputs.md) example
showcases how to request multiple outputs from the Camera node.

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/camera_output.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 1, "node2Input": "in", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[1, {"alias": "", "id": 1, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 4, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_0"}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 100663297, "aeRegion": {"height": 0, "priority": 2712510512, "width": 0, "x": 4163, "y": 1}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 23589, "y": 0}, "antiBandingMode": 173, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 120, "brightness": -86, "captureIntent": 161, "chromaDenoise": 0, "cmdMask": 0, "contrast": 43, "controlMode": 37, "effectMode": 92, "enableHdr": false, "expCompensation": -41, "expManual": {"exposureTimeUs": 23589, "frameDurationUs": 23589, "sensitivityIso": 2712510620}, "frameSyncMode": 0, "lensPosAutoInfinity": 176, "lensPosAutoMacro": 160, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 25, "sceneMode": 160, "sharpness": 0, "strobeConfig": {"activeLevel": 216, "enable": 0, "gpioNumber": -90}, "strobeTimings": {"durationUs": 4, "exposureBeginOffsetUs": 23589, "exposureEndOffsetUs": 17}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [640, 400]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3

import cv2
import depthai as dai

# Create pipeline
with dai.Pipeline() as pipeline:
    # Define source and output
    cam = pipeline.create(dai.node.Camera).build()
    videoQueue = cam.requestOutput((640,400)).createOutputQueue()

    # Connect to device and start pipeline
    pipeline.start()
    while pipeline.isRunning():
        videoIn = videoQueue.get()
        assert isinstance(videoIn, dai.ImgFrame)
        cv2.imshow("video", videoIn.getCvFrame())

        if cv2.waitKey(1) == ord("q"):
            break
```

#### C++

```cpp
#include <atomic>
#include <csignal>
#include <iostream>
#include <memory>
#include <opencv2/opencv.hpp>

#include "depthai/depthai.hpp"

std::atomic<bool> quitEvent(false);

void signalHandler(int) {
    quitEvent = true;
}

int main() {
    signal(SIGTERM, signalHandler);
    signal(SIGINT, signalHandler);

    // Create device
    std::shared_ptr<dai::Device> device = std::make_shared<dai::Device>();

    // Create pipeline
    dai::Pipeline pipeline(device);

    // Create nodes
    auto cam = pipeline.create<dai::node::Camera>()->build();
    auto videoQueue = cam->requestOutput(std::make_pair(640, 400))->createOutputQueue();

    // Start pipeline
    pipeline.start();

    while(pipeline.isRunning() && !quitEvent) {
        auto videoIn = videoQueue->get<dai::ImgFrame>();
        if(videoIn == nullptr) continue;

        cv::imshow("video", videoIn->getCvFrame());

        if(cv::waitKey(1) == 'q') {
            break;
        }
    }

    pipeline.stop();
    pipeline.wait();

    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
