# ImageManip multiple operations

This example showcases multiple [ImageManip](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/image_manip.md)
operations in a row (one after another):

 * Resizing ([resize mode: letterbox](https://docs.luxonis.com/software-v3/depthai/tutorials/resolution-techniques.md))
 * Cropping
 * Vertical flipping
 * Changing of frame type (NV12)

All of this is done within the single ImageManip node in the pipeline. Note that operations order is important, as operations are
applied in the order they are set.

## Demo

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/image_manip_multi_ops.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 1, "node1Output": "out", "node1OutputGroup": "", "node2Id": 2, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 1, "node2Input": "inputImage", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[2, {"alias": "", "id": 2, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 7, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_1_out"}}], [1, {"alias": "", "id": 1, "ioInfo": [[["", "out"], {"blocking": false, "group": "", "id": 6, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "inputImage"], {"blocking": true, "group": "", "id": 5, "name": "inputImage", "queueSize": 3, "type": 3, "waitForMessage": false}], [["", "inputConfig"], {"blocking": true, "group": "", "id": 4, "name": "inputConfig", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "ImageManip", "parentId": -1, "properties": {"backend": 0, "initialConfig": {"base": {"background": 0, "backgroundB": 0, "backgroundG": 0, "backgroundR": 0, "center": true, "colormap": 0, "operations": [{"op": {"index": 0, "value": {"normalized": false, "offsetX": -50.0, "offsetY": -100.0}}}, {"op": {"index": 7, "value": {"center": false, "height": 500.0, "normalized": false, "width": 500.0}}}, {"op": {"index": 3, "value": {"center": true, "direction": 1}}}], "outputHeight": 710, "outputWidth": 1270, "resizeMode": 2, "undistort": false}, "outputFrameType": 22, "reusePreviousImage": false, "skipCurrentImage": false}, "numFramesPool": 4, "outputFrameSize": 2709360, "performanceMode": 0}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 0, "aeRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 0, "y": 0}, "antiBandingMode": 0, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 0, "brightness": 0, "captureIntent": 0, "chromaDenoise": 0, "cmdMask": 0, "contrast": 0, "controlMode": 0, "effectMode": 0, "enableHdr": false, "expCompensation": 0, "expManual": {"exposureTimeUs": 0, "frameDurationUs": 0, "sensitivityIso": 0}, "frameSyncMode": 0, "lensPosAutoInfinity": 0, "lensPosAutoMacro": 0, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 0, "sceneMode": 0, "sharpness": 0, "strobeConfig": {"activeLevel": 0, "enable": 0, "gpioNumber": 0}, "strobeTimings": {"durationUs": 0, "exposureBeginOffsetUs": 0, "exposureEndOffsetUs": 0}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [1920, 1080]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code

#### Python

```python
import depthai as dai
import cv2

pipeline = dai.Pipeline()

camRgb = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
manip = pipeline.create(dai.node.ImageManip)

manip.initialConfig.setOutputSize(1270, 710, dai.ImageManipConfig.ResizeMode.LETTERBOX)
manip.initialConfig.addCrop(50, 100, 500, 500)
manip.initialConfig.addFlipVertical()
manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
manip.setMaxOutputFrameSize(2709360)

camRgb.requestOutput((1920, 1080)).link(manip.inputImage)

out = manip.out.createOutputQueue()

pipeline.start()

print(manip.initialConfig)

while True:
    inFrame = out.get()
    if inFrame is not None:
        cv2.imshow("Show frame", inFrame.getCvFrame())
        key = cv2.waitKey(1)
        if key == ord('q'):
            break
```

#### C++

```cpp
#include <atomic>
#include <csignal>
#include <memory>

#include "depthai/depthai.hpp"
#include "depthai/pipeline/datatype/ImgFrame.hpp"
#include "depthai/pipeline/node/host/Display.hpp"

std::atomic<bool> quitEvent(false);

void signalHandler(int) {
    quitEvent = true;
}

int main(int argc, char** argv) {
    signal(SIGTERM, signalHandler);
    signal(SIGINT, signalHandler);

    std::shared_ptr<dai::Device> device = nullptr;
    if(argc <= 1) {
        device = std::make_shared<dai::Device>();
    } else {
        device = std::make_shared<dai::Device>(argv[1]);
    }
    dai::Pipeline pipeline(device);

    auto camRgb = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_A);
    auto manip = pipeline.create<dai::node::ImageManip>();

    manip->setMaxOutputFrameSize(4000000);
    manip->initialConfig->setOutputSize(1280, 720, dai::ImageManipConfig::ResizeMode::LETTERBOX);
    manip->initialConfig->setBackgroundColor(100, 100, 100);
    manip->initialConfig->addRotateDeg(45);
    manip->initialConfig->addCrop(100, 100, 800, 600);
    manip->initialConfig->addFlipVertical();
    manip->initialConfig->setFrameType(dai::ImgFrame::Type::RGB888p);

    auto* rgbOut = camRgb->requestOutput({1920, 1080});
    rgbOut->link(manip->inputImage);
    auto outputQueue = manip->out.createOutputQueue();
    pipeline.start();
    while(pipeline.isRunning() && !quitEvent) {
        auto imgFrame = outputQueue->get<dai::ImgFrame>();
        cv::imshow("Manipulated Frame", imgFrame->getCvFrame());
        int key = cv::waitKey(1);
        if(key == 'q') {
            break;
        }
    }

    pipeline.stop();
    pipeline.wait();

    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
