RGB Encoding & Mono & MobilenetSSD
Similar samples:
- RGB Encoding
- RGB & Mono Encoding
- Encoding Max Limit
- RGB Encoding & MobilenetSSD
- RGB Encoding & Mono with MobilenetSSD & Depth
Demo
Setup
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.pySource code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import sys
5import cv2
6import depthai as dai
7import numpy as np
8
9# Get argument first
10nnPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
11if len(sys.argv) > 1:
12 nnPath = sys.argv[1]
13
14if not Path(nnPath).exists():
15 import sys
16 raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
17
18# MobilenetSSD label texts
19labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
20 "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
21
22# Create pipeline
23pipeline = dai.Pipeline()
24
25# Define sources and outputs
26camRgb = pipeline.create(dai.node.ColorCamera)
27monoRight = pipeline.create(dai.node.MonoCamera)
28videoEncoder = pipeline.create(dai.node.VideoEncoder)
29nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
30manip = pipeline.create(dai.node.ImageManip)
31
32videoOut = pipeline.create(dai.node.XLinkOut)
33xoutRight = pipeline.create(dai.node.XLinkOut)
34manipOut = pipeline.create(dai.node.XLinkOut)
35nnOut = pipeline.create(dai.node.XLinkOut)
36
37videoOut.setStreamName('h265')
38xoutRight.setStreamName("right")
39manipOut.setStreamName("manip")
40nnOut.setStreamName("nn")
41
42# Properties
43camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
44camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
45monoRight.setCamera("right")
46monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
47videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN)
48
49nn.setConfidenceThreshold(0.5)
50nn.setBlobPath(nnPath)
51nn.setNumInferenceThreads(2)
52nn.input.setBlocking(False)
53
54# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
55manip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)
56manip.initialConfig.setResize(300, 300)
57
58# Linking
59camRgb.video.link(videoEncoder.input)
60videoEncoder.bitstream.link(videoOut.input)
61monoRight.out.link(manip.inputImage)
62manip.out.link(nn.input)
63monoRight.out.link(xoutRight.input)
64manip.out.link(manipOut.input)
65nn.out.link(nnOut.input)
66
67# Connect to device and start pipeline
68with dai.Device(pipeline) as device:
69
70 # Queues
71 queue_size = 8
72 qRight = device.getOutputQueue("right", queue_size)
73 qManip = device.getOutputQueue("manip", queue_size)
74 qDet = device.getOutputQueue("nn", queue_size)
75 qRgbEnc = device.getOutputQueue('h265', maxSize=30, blocking=True)
76
77 frame = None
78 frameManip = None
79 detections = []
80 offsetX = (monoRight.getResolutionWidth() - monoRight.getResolutionHeight()) // 2
81 color = (255, 0, 0)
82 croppedFrame = np.zeros((monoRight.getResolutionHeight(), monoRight.getResolutionHeight()))
83
84 def frameNorm(frame, bbox):
85 normVals = np.full(len(bbox), frame.shape[0])
86 normVals[::2] = frame.shape[1]
87 return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
88
89 videoFile = open('video.h265', 'wb')
90 cv2.namedWindow("right", cv2.WINDOW_NORMAL)
91 cv2.namedWindow("manip", cv2.WINDOW_NORMAL)
92
93 while True:
94 inRight = qRight.tryGet()
95 inManip = qManip.tryGet()
96 inDet = qDet.tryGet()
97
98 while qRgbEnc.has():
99 qRgbEnc.get().getData().tofile(videoFile)
100
101 if inRight is not None:
102 frame = inRight.getCvFrame()
103
104 if inManip is not None:
105 frameManip = inManip.getCvFrame()
106
107 if inDet is not None:
108 detections = inDet.detections
109
110 if frame is not None:
111 for detection in detections:
112 bbox = frameNorm(croppedFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
113 bbox[::2] += offsetX
114 cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
115 cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
116 cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
117 # Show the frame
118 cv2.imshow("right", frame)
119
120 if frameManip is not None:
121 for detection in detections:
122 bbox = frameNorm(frameManip, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
123 cv2.putText(frameManip, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
124 cv2.putText(frameManip, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
125 cv2.rectangle(frameManip, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
126 # Show the frame
127 cv2.imshow("manip", frameManip)
128
129 if cv2.waitKey(1) == ord('q'):
130 break
131
132 print("To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:")
133 print("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4")Pipeline
Need assistance?
Head over to Discussion Forum for technical support or any other questions you might have.