ImageManip Tiling

Frame tiling could be useful for eg. feeding large frame into a NeuralNetwork whose input size isn’t as large. In such case, you can tile the large frame into multiple smaller ones and feed smaller frames to the NeuralNetwork.

In this example we use 2 ImageManip for splitting the original 1000x500 preview frame into two 500x500 frames.

Demo

Tiling preview into 2 frames/tiles

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script

git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py

For additional information, please follow installation guide

Source code

Also available on GitHub

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env python3

import cv2
import depthai as dai

# Create pipeline
pipeline = dai.Pipeline()

camRgb = pipeline.create(dai.node.ColorCamera)
camRgb.setPreviewSize(1000, 500)
camRgb.setInterleaved(False)
maxFrameSize = camRgb.getPreviewHeight() * camRgb.getPreviewWidth() * 3

# In this example we use 2 imageManips for splitting the original 1000x500
# preview frame into 2 500x500 frames
manip1 = pipeline.create(dai.node.ImageManip)
manip1.initialConfig.setCropRect(0, 0, 0.5, 1)
manip1.setMaxOutputFrameSize(maxFrameSize)
camRgb.preview.link(manip1.inputImage)

manip2 = pipeline.create(dai.node.ImageManip)
manip2.initialConfig.setCropRect(0.5, 0, 1, 1)
manip2.setMaxOutputFrameSize(maxFrameSize)
camRgb.preview.link(manip2.inputImage)

xout1 = pipeline.create(dai.node.XLinkOut)
xout1.setStreamName('out1')
manip1.out.link(xout1.input)

xout2 = pipeline.create(dai.node.XLinkOut)
xout2.setStreamName('out2')
manip2.out.link(xout2.input)

# Connect to device and start pipeline
with dai.Device(pipeline) as device:
    # Output queue will be used to get the rgb frames from the output defined above
    q1 = device.getOutputQueue(name="out1", maxSize=4, blocking=False)
    q2 = device.getOutputQueue(name="out2", maxSize=4, blocking=False)

    while True:
        if q1.has():
            cv2.imshow("Tile 1", q1.get().getCvFrame())

        if q2.has():
            cv2.imshow("Tile 2", q2.get().getCvFrame())

        if cv2.waitKey(1) == ord('q'):
            break

Also available on GitHub

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#include <iostream>

// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"

int main() {
    using namespace std;

    // Create pipeline
    dai::Pipeline pipeline;

    auto camRgb = pipeline.create<dai::node::ColorCamera>();
    camRgb->setPreviewSize(1000, 500);
    camRgb->setInterleaved(false);
    auto maxFrameSize = camRgb->getPreviewHeight() * camRgb->getPreviewHeight() * 3;

    // In this example we use 2 imageManips for splitting the original 1000x500
    // preview frame into 2 500x500 frames
    auto manip1 = pipeline.create<dai::node::ImageManip>();
    manip1->initialConfig.setCropRect(0, 0, 0.5, 1);
    // Flip functionality
    manip1->initialConfig.setHorizontalFlip(true);
    manip1->setMaxOutputFrameSize(maxFrameSize);
    camRgb->preview.link(manip1->inputImage);

    auto manip2 = pipeline.create<dai::node::ImageManip>();
    manip2->initialConfig.setCropRect(0.5, 0, 1, 1);
    // Flip functionality
    manip1->initialConfig.setVerticalFlip(true);
    manip2->setMaxOutputFrameSize(maxFrameSize);
    camRgb->preview.link(manip2->inputImage);

    auto xout1 = pipeline.create<dai::node::XLinkOut>();
    xout1->setStreamName("out1");
    manip1->out.link(xout1->input);

    auto xout2 = pipeline.create<dai::node::XLinkOut>();
    xout2->setStreamName("out2");
    manip2->out.link(xout2->input);

    dai::Device device(pipeline);

    auto q1 = device.getOutputQueue("out1", 8, false);
    auto q2 = device.getOutputQueue("out2", 8, false);

    while(true) {
        auto in1 = q1->tryGet<dai::ImgFrame>();
        if(in1) {
            cv::imshow("Tile 1", in1->getCvFrame());
        }

        auto in2 = q2->tryGet<dai::ImgFrame>();
        if(in2) {
            cv::imshow("Tile 2", in2->getCvFrame());
        }

        int key = cv::waitKey(1);
        if(key == 'q' || key == 'Q') return 0;
    }
    return 0;
}

Got questions?

Head over to Discussion Forum for technical support or any other questions you might have.