Demo
This example requires the DepthAI v3 API, see installation instructions.Pipeline
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2import cv2
3import depthai as dai
4import numpy as np
5from pathlib import Path
6
7# Get the absolute path of the current script's directory
8script_dir = Path(__file__).resolve().parent
9examplesRoot = (script_dir / Path('../')).resolve() # This resolves the parent directory correctly
10models = examplesRoot / 'models'
11tagImage = models / 'lenna.png'
12
13# Decode the image using OpenCV
14lenaImage = cv2.imread(str(tagImage.resolve()))
15lenaImage = cv2.resize(lenaImage, (256, 256))
16lenaImage = cv2.cvtColor(lenaImage, cv2.COLOR_BGR2RGB)
17lenaImage = np.array(lenaImage)
18
19device = dai.Device()
20platform = device.getPlatform()
21if platform == dai.Platform.RVC2:
22 lenaImage = np.transpose(lenaImage, (2, 0, 1))
23 nnTensorType = dai.TensorInfo.DataType.U8F
24elif platform == dai.Platform.RVC4:
25 # Add an empty dimension to the beginning
26 lenaImage = np.expand_dims(lenaImage, axis=0)
27 nnTensorType = dai.TensorInfo.DataType.FP16
28
29inputNNData = dai.NNData()
30inputNNData.addTensor("image1", lenaImage, dataType=nnTensorType)
31inputNNData.addTensor("image2", lenaImage, dataType=nnTensorType)
32
33
34with dai.Pipeline(device) as pipeline:
35 model = dai.NNModelDescription("depthai-test-models/simple-concatenate-model")
36 model.platform = platform.name
37
38 nnArchive = dai.NNArchive(dai.getModelFromZoo(model))
39
40 neuralNetwork = pipeline.create(dai.node.NeuralNetwork)
41 neuralNetwork.setNNArchive(nnArchive)
42 nnDataInputQueue = neuralNetwork.input.createInputQueue()
43 qNNData = neuralNetwork.out.createOutputQueue()
44 pipeline.start()
45 while pipeline.isRunning():
46 nnDataInputQueue.send(inputNNData)
47 inNNData: dai.NNData = qNNData.get()
48 tensor : np.ndarray = inNNData.getFirstTensor()
49 # Drop the first dimension
50 tensor = tensor.squeeze().astype(np.uint8)
51 # Check the shape - in case 3 is not the last dimension, permute it to the last
52 if tensor.shape[0] == 3:
53 tensor = np.transpose(tensor, (1, 2, 0))
54 cv2.imshow("Combined image", tensor)
55 key = cv2.waitKey(1)
56 if key == ord('q'):
57 breakNeed assistance?
Head over to Discussion Forum for technical support or any other questions you might have.