DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • Mono & MobilenetSSD
  • Similar samples:
  • Demo
  • Setup
  • Source code
  • Pipeline

Mono & MobilenetSSD

This example shows how to run MobileNetv2SSD on the right grayscale camera and how to display the neural network results on a preview of the right camera stream.

Similar samples:

Demo

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python
C++

Python

Python
GitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import sys
5import cv2
6import depthai as dai
7import numpy as np
8
9# Get argument first
10nnPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
11if len(sys.argv) > 1:
12    nnPath = sys.argv[1]
13
14if not Path(nnPath).exists():
15    import sys
16    raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
17
18# MobilenetSSD label texts
19labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
20            "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
21
22# Create pipeline
23pipeline = dai.Pipeline()
24
25# Define sources and outputs
26monoRight = pipeline.create(dai.node.MonoCamera)
27manip = pipeline.create(dai.node.ImageManip)
28nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
29manipOut = pipeline.create(dai.node.XLinkOut)
30nnOut = pipeline.create(dai.node.XLinkOut)
31
32manipOut.setStreamName("right")
33nnOut.setStreamName("nn")
34
35# Properties
36monoRight.setCamera("right")
37monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
38
39# Convert the grayscale frame into the nn-acceptable form
40manip.initialConfig.setResize(300, 300)
41# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
42manip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)
43
44nn.setConfidenceThreshold(0.5)
45nn.setBlobPath(nnPath)
46nn.setNumInferenceThreads(2)
47nn.input.setBlocking(False)
48
49# Linking
50monoRight.out.link(manip.inputImage)
51manip.out.link(nn.input)
52manip.out.link(manipOut.input)
53nn.out.link(nnOut.input)
54
55# Connect to device and start pipeline
56with dai.Device(pipeline) as device:
57
58    # Output queues will be used to get the grayscale frames and nn data from the outputs defined above
59    qRight = device.getOutputQueue("right", maxSize=4, blocking=False)
60    qDet = device.getOutputQueue("nn", maxSize=4, blocking=False)
61
62    frame = None
63    detections = []
64
65    # nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
66    def frameNorm(frame, bbox):
67        normVals = np.full(len(bbox), frame.shape[0])
68        normVals[::2] = frame.shape[1]
69        return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
70
71    def displayFrame(name, frame):
72        color = (255, 0, 0)
73        for detection in detections:
74            bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
75            cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
76            cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
77            cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
78        # Show the frame
79        cv2.imshow(name, frame)
80
81    while True:
82        # Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
83        inRight = qRight.tryGet()
84        inDet = qDet.tryGet()
85
86        if inRight is not None:
87            frame = inRight.getCvFrame()
88
89        if inDet is not None:
90            detections = inDet.detections
91
92        if frame is not None:
93            displayFrame("right", frame)
94
95        if cv2.waitKey(1) == ord('q'):
96            break

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.