# Warp Mesh

This example shows usage of [Warp](https://docs.luxonis.com/software-v3/depthai/depthai-components/nodes/warp.md) node to warp the
input image frame.

## Demo

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/warp_mesh.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 1, "node1Output": "out", "node1OutputGroup": "", "node2Id": 4, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 2, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 1, "node2Input": "inputImage", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[4, {"alias": "", "id": 4, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 7, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_1_out"}}], [2, {"alias": "", "id": 2, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 6, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_0"}}], [1, {"alias": "", "id": 1, "ioInfo": [[["", "out"], {"blocking": false, "group": "", "id": 5, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "inputImage"], {"blocking": true, "group": "", "id": 4, "name": "inputImage", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Warp", "parentId": -1, "properties": {"interpolation": 0, "meshHeight": 3, "meshUri": "asset:mesh", "meshWidth": 3, "numFramesPool": 4, "outputFrameSize": 921600, "outputHeight": 480, "outputWidth": 640, "warpHwIds": []}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 100663297, "aeRegion": {"height": 0, "priority": 3429757504, "width": 0, "x": 4163, "y": 1}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 23127, "y": 0}, "antiBandingMode": 109, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 136, "brightness": -105, "captureIntent": 204, "chromaDenoise": 0, "cmdMask": 0, "contrast": -33, "controlMode": 87, "effectMode": 90, "enableHdr": false, "expCompensation": 39, "expManual": {"exposureTimeUs": 23127, "frameDurationUs": 23127, "sensitivityIso": 3429757612}, "frameSyncMode": 0, "lensPosAutoInfinity": 192, "lensPosAutoMacro": 242, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": -34, "sceneMode": 242, "sharpness": 0, "strobeConfig": {"activeLevel": 232, "enable": 0, "gpioNumber": -8}, "strobeTimings": {"durationUs": 16, "exposureBeginOffsetUs": 23127, "exposureEndOffsetUs": 17}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [1280, 800]}}, "type": 8}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3
import cv2
import depthai as dai

# This script demonstrates how to use a warp mesh in DepthAI to transform
# incoming camera frames. A warp mesh is a grid of points where each point
# indicates from which location in the source image we sample pixels to place
# in the corresponding location of the output image.

# 3x3 WARP MESH (SAMPLE SHIFTS):
# ------------------------------
# Conceptually, we have 9 points arranged like this:

#     p0 ----- p1 ----- p2
#       |       |       |
#     p3 ----- p4 ----- p5
#       |       |       |
#     p6 ----- p7 ----- p8

# Each point is defined by an (x, y) coordinate in the source image.
# If all points were placed in their "natural" positions, we'd get
# an identity (no distortion) mapping. By shifting one or more points,
# we create warping or perspective effects.

# For example, if p3 is shifted horizontally, then at that grid
# position, we "pull" pixels from a different horizontal location,
# causing a horizontal distortion across that row.

# Below, we show how to create a slightly shifted 3x3 mesh.
# Feel free to adjust the coordinates to see how the output changes.

# Create pipeline
pipeline = dai.Pipeline()

width, height = 1280, 800
camRgb = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)

platform = pipeline.getDefaultDevice().getPlatform()
imgType = dai.ImgFrame.Type.BGR888p if platform == dai.Platform.RVC2 else dai.ImgFrame.Type.NV12

cameraOutput = camRgb.requestOutput((width, height), type=imgType)
originalFrameQueue = cameraOutput.createOutputQueue()

# Define a 3x3 warp mesh with a small horizontal shift towards the center the middle row
# and a small vertical shift towar
# Each point tells the warp node from which source coordinate
# to sample pixels for the final output.
p0_3x3 = dai.Point2f(0, 0)
p1_3x3 = dai.Point2f(width / 2, 200) # Identity would be (width / 2, 0)
p2_3x3 = dai.Point2f(width, 0)

p3_3x3 = dai.Point2f(300, height / 2) # Identity would be (0, height / 2)
p4_3x3 = dai.Point2f(width / 2, height / 2)
p5_3x3 = dai.Point2f(width - 300, height / 2) # Identity would be (width, height / 2)

p6_3x3 = dai.Point2f(0, height)
p7_3x3 = dai.Point2f(width / 2, height - 200) # Identity would be (width / 2, height)
p8_3x3 = dai.Point2f(width, height)

# Create and configure the Warp node
warp = pipeline.create(dai.node.Warp)
warp.setWarpMesh([
    p0_3x3, p1_3x3, p2_3x3,
    p3_3x3, p4_3x3, p5_3x3,
    p6_3x3, p7_3x3, p8_3x3
], 3, 3)

# Set output size and frame limits
warpOutputSize = (640, 480)
warp.setOutputSize(warpOutputSize)
warp.setMaxOutputFrameSize(warpOutputSize[0] * warpOutputSize[1] * 3)
warp.setInterpolation(dai.Interpolation.BILINEAR)

cameraOutput.link(warp.inputImage)
warpQueue = warp.out.createOutputQueue()

# Start the pipeline
pipeline.start()
with pipeline:
    while True:
        # Get and show original frame
        originalFrame = originalFrameQueue.get()
        assert isinstance(originalFrame, dai.ImgFrame)
        cv2.imshow("Original", originalFrame.getCvFrame())

        # Get and show warped frame
        warpedFrame = warpQueue.get()
        assert isinstance(warpedFrame, dai.ImgFrame)
        if platform == dai.Platform.RVC4:
            warpedFrame.setType(dai.ImgFrame.Type.GRAY8) # Chroma plane warping is not yet supported on RVC4
        cv2.imshow("Warped", warpedFrame.getCvFrame())

        if cv2.waitKey(1) == ord('q'):
            break
```

#### C++

WIP

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
