DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • RGB & MobileNetSSD @ 4K
  • Similar samples:
  • Demo
  • Setup
  • Source code
  • Pipeline

RGB & MobileNetSSD @ 4K

This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB preview and the metadata results from the MobileNetv2SSD on the preview. The preview size is set to 4K resolution.It's a variation of RGB & MobilenetSSD.

Similar samples:

Demo

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python
C++

Python

Python
GitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import sys
5import cv2
6import depthai as dai
7import numpy as np
8
9# Get argument first
10nnPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_5shave.blob')).resolve().absolute())
11if len(sys.argv) > 1:
12    nnPath = sys.argv[1]
13
14if not Path(nnPath).exists():
15    import sys
16    raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
17
18# MobilenetSSD label texts
19labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
20            "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
21
22# Create pipeline
23pipeline = dai.Pipeline()
24
25# Define sources and outputs
26camRgb = pipeline.create(dai.node.ColorCamera)
27nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
28
29xoutVideo = pipeline.create(dai.node.XLinkOut)
30xoutPreview = pipeline.create(dai.node.XLinkOut)
31nnOut = pipeline.create(dai.node.XLinkOut)
32
33xoutVideo.setStreamName("video")
34xoutPreview.setStreamName("preview")
35nnOut.setStreamName("nn")
36
37# Properties
38camRgb.setPreviewSize(300, 300)    # NN input
39camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
40camRgb.setInterleaved(False)
41camRgb.setPreviewKeepAspectRatio(False)
42# Define a neural network that will make predictions based on the source frames
43nn.setConfidenceThreshold(0.5)
44nn.setBlobPath(nnPath)
45nn.setNumInferenceThreads(2)
46nn.input.setBlocking(False)
47
48# Linking
49camRgb.video.link(xoutVideo.input)
50camRgb.preview.link(xoutPreview.input)
51camRgb.preview.link(nn.input)
52nn.out.link(nnOut.input)
53
54# Connect to device and start pipeline
55with dai.Device(pipeline) as device:
56
57    # Output queues will be used to get the frames and nn data from the outputs defined above
58    qVideo = device.getOutputQueue(name="video", maxSize=4, blocking=False)
59    qPreview = device.getOutputQueue(name="preview", maxSize=4, blocking=False)
60    qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
61
62    previewFrame = None
63    videoFrame = None
64    detections = []
65
66    # nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
67    def frameNorm(frame, bbox):
68        normVals = np.full(len(bbox), frame.shape[0])
69        normVals[::2] = frame.shape[1]
70        return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
71
72    def displayFrame(name, frame):
73        color = (255, 0, 0)
74        for detection in detections:
75            bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
76            cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
77            cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
78            cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
79        # Show the frame
80        cv2.imshow(name, frame)
81
82    cv2.namedWindow("video", cv2.WINDOW_NORMAL)
83    cv2.resizeWindow("video", 1280, 720)
84    print("Resize video window with mouse drag!")
85
86    while True:
87        # Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
88        inVideo = qVideo.tryGet()
89        inPreview = qPreview.tryGet()
90        inDet = qDet.tryGet()
91
92        if inVideo is not None:
93            videoFrame = inVideo.getCvFrame()
94
95        if inPreview is not None:
96            previewFrame = inPreview.getCvFrame()
97
98        if inDet is not None:
99            detections = inDet.detections
100
101        if videoFrame is not None:
102            displayFrame("video", videoFrame)
103
104        if previewFrame is not None:
105            displayFrame("preview", previewFrame)
106
107        if cv2.waitKey(1) == ord('q'):
108            break

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.