RGB & MobilenetSSD
This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB preview and the metadata results from the MobileNetv2SSD on the preview.Similar samples:
Demo
Setup
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the scriptCommand Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
Source code
Python
C++
Python
PythonGitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import cv2
5import depthai as dai
6import numpy as np
7import time
8import argparse
9
10nnPathDefault = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
11parser = argparse.ArgumentParser()
12parser.add_argument('nnPath', nargs='?', help="Path to mobilenet detection network blob", default=nnPathDefault)
13parser.add_argument('-s', '--sync', action="store_true", help="Sync RGB output with NN output", default=False)
14args = parser.parse_args()
15
16if not Path(nnPathDefault).exists():
17 import sys
18 raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
19
20# MobilenetSSD label texts
21labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
22 "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
23
24# Create pipeline
25pipeline = dai.Pipeline()
26
27# Define sources and outputs
28camRgb = pipeline.create(dai.node.ColorCamera)
29nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
30xoutRgb = pipeline.create(dai.node.XLinkOut)
31nnOut = pipeline.create(dai.node.XLinkOut)
32nnNetworkOut = pipeline.create(dai.node.XLinkOut)
33
34xoutRgb.setStreamName("rgb")
35nnOut.setStreamName("nn")
36nnNetworkOut.setStreamName("nnNetwork");
37
38# Properties
39camRgb.setPreviewSize(300, 300)
40camRgb.setInterleaved(False)
41camRgb.setFps(40)
42# Define a neural network that will make predictions based on the source frames
43nn.setConfidenceThreshold(0.5)
44nn.setBlobPath(args.nnPath)
45nn.setNumInferenceThreads(2)
46nn.input.setBlocking(False)
47
48# Linking
49if args.sync:
50 nn.passthrough.link(xoutRgb.input)
51else:
52 camRgb.preview.link(xoutRgb.input)
53
54camRgb.preview.link(nn.input)
55nn.out.link(nnOut.input)
56nn.outNetwork.link(nnNetworkOut.input);
57
58# Connect to device and start pipeline
59with dai.Device(pipeline) as device:
60
61 # Output queues will be used to get the rgb frames and nn data from the outputs defined above
62 qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
63 qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
64 qNN = device.getOutputQueue(name="nnNetwork", maxSize=4, blocking=False);
65
66 frame = None
67 detections = []
68 startTime = time.monotonic()
69 counter = 0
70 color2 = (255, 255, 255)
71
72 # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height
73 def frameNorm(frame, bbox):
74 normVals = np.full(len(bbox), frame.shape[0])
75 normVals[::2] = frame.shape[1]
76 return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
77
78 def displayFrame(name, frame):
79 color = (255, 0, 0)
80 for detection in detections:
81 bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
82 cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
83 cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
84 cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
85 # Show the frame
86 cv2.imshow(name, frame)
87
88 printOutputLayersOnce = True
89
90 while True:
91 if args.sync:
92 # Use blocking get() call to catch frame and inference result synced
93 inRgb = qRgb.get()
94 inDet = qDet.get()
95 inNN = qNN.get()
96 else:
97 # Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
98 inRgb = qRgb.tryGet()
99 inDet = qDet.tryGet()
100 inNN = qNN.tryGet()
101
102 if inRgb is not None:
103 frame = inRgb.getCvFrame()
104 cv2.putText(frame, "NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)),
105 (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color2)
106
107 if inDet is not None:
108 detections = inDet.detections
109 counter += 1
110
111 if printOutputLayersOnce and inNN is not None:
112 toPrint = 'Output layer names:'
113 for ten in inNN.getAllLayerNames():
114 toPrint = f'{toPrint} {ten},'
115 print(toPrint)
116 printOutputLayersOnce = False;
117
118 # If the frame is available, draw bounding boxes on it and show the frame
119 if frame is not None:
120 displayFrame("rgb", frame)
121
122 if cv2.waitKey(1) == ord('q'):
123 break
Pipeline
Need assistance?
Head over to Discussion Forum for technical support or any other questions you might have.