DepthAI Tutorials
DepthAI API References

ON THIS PAGE

  • Spatial object tracker on RGB
  • Similar samples:
  • Demo
  • Setup
  • Source code
  • Pipeline

Spatial object tracker on RGB

This example shows how to run MobileNetv2SSD on the RGB input frame, and perform spatial object tracking on persons.setConfidenceThreshold - confidence threshold above which objects are detected

Similar samples:

Demo

Setup

Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
Command Line
1git clone https://github.com/luxonis/depthai-python.git
2cd depthai-python/examples
3python3 install_requirements.py
For additional information, please follow the installation guide.

Source code

Python
C++

Python

Python
GitHub
1#!/usr/bin/env python3
2
3from pathlib import Path
4import cv2
5import depthai as dai
6import numpy as np
7import time
8import argparse
9
10labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
11            "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
12
13nnPathDefault = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_5shave.blob')).resolve().absolute())
14parser = argparse.ArgumentParser()
15parser.add_argument('nnPath', nargs='?', help="Path to mobilenet detection network blob", default=nnPathDefault)
16parser.add_argument('-ff', '--full_frame', action="store_true", help="Perform tracking on full RGB frame", default=False)
17
18args = parser.parse_args()
19
20fullFrameTracking = args.full_frame
21
22# Create pipeline
23pipeline = dai.Pipeline()
24
25# Define sources and outputs
26camRgb = pipeline.create(dai.node.ColorCamera)
27spatialDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork)
28monoLeft = pipeline.create(dai.node.MonoCamera)
29monoRight = pipeline.create(dai.node.MonoCamera)
30stereo = pipeline.create(dai.node.StereoDepth)
31objectTracker = pipeline.create(dai.node.ObjectTracker)
32
33xoutRgb = pipeline.create(dai.node.XLinkOut)
34trackerOut = pipeline.create(dai.node.XLinkOut)
35
36xoutRgb.setStreamName("preview")
37trackerOut.setStreamName("tracklets")
38
39# Properties
40camRgb.setPreviewSize(300, 300)
41camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
42camRgb.setInterleaved(False)
43camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
44
45monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
46monoLeft.setCamera("left")
47monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
48monoRight.setCamera("right")
49
50# setting node configs
51stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
52# Align depth map to the perspective of RGB camera, on which inference is done
53stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A)
54stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
55
56spatialDetectionNetwork.setBlobPath(args.nnPath)
57spatialDetectionNetwork.setConfidenceThreshold(0.5)
58spatialDetectionNetwork.input.setBlocking(False)
59spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
60spatialDetectionNetwork.setDepthLowerThreshold(100)
61spatialDetectionNetwork.setDepthUpperThreshold(5000)
62
63objectTracker.setDetectionLabelsToTrack([15])  # track only person
64# possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF
65objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
66# take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID
67objectTracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.SMALLEST_ID)
68
69# Linking
70monoLeft.out.link(stereo.left)
71monoRight.out.link(stereo.right)
72
73camRgb.preview.link(spatialDetectionNetwork.input)
74objectTracker.passthroughTrackerFrame.link(xoutRgb.input)
75objectTracker.out.link(trackerOut.input)
76
77if fullFrameTracking:
78    camRgb.setPreviewKeepAspectRatio(False)
79    camRgb.video.link(objectTracker.inputTrackerFrame)
80    objectTracker.inputTrackerFrame.setBlocking(False)
81    # do not block the pipeline if it's too slow on full frame
82    objectTracker.inputTrackerFrame.setQueueSize(2)
83else:
84    spatialDetectionNetwork.passthrough.link(objectTracker.inputTrackerFrame)
85
86spatialDetectionNetwork.passthrough.link(objectTracker.inputDetectionFrame)
87spatialDetectionNetwork.out.link(objectTracker.inputDetections)
88stereo.depth.link(spatialDetectionNetwork.inputDepth)
89
90# Connect to device and start pipeline
91with dai.Device(pipeline) as device:
92
93    preview = device.getOutputQueue("preview", 4, False)
94    tracklets = device.getOutputQueue("tracklets", 4, False)
95
96    startTime = time.monotonic()
97    counter = 0
98    fps = 0
99    color = (255, 255, 255)
100
101    while(True):
102        imgFrame = preview.get()
103        track = tracklets.get()
104
105        counter+=1
106        current_time = time.monotonic()
107        if (current_time - startTime) > 1 :
108            fps = counter / (current_time - startTime)
109            counter = 0
110            startTime = current_time
111
112        frame = imgFrame.getCvFrame()
113        trackletsData = track.tracklets
114        for t in trackletsData:
115            roi = t.roi.denormalize(frame.shape[1], frame.shape[0])
116            x1 = int(roi.topLeft().x)
117            y1 = int(roi.topLeft().y)
118            x2 = int(roi.bottomRight().x)
119            y2 = int(roi.bottomRight().y)
120
121            try:
122                label = labelMap[t.label]
123            except:
124                label = t.label
125
126            cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
127            cv2.putText(frame, f"ID: {[t.id]}", (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
128            cv2.putText(frame, t.status.name, (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
129            cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
130
131            cv2.putText(frame, f"X: {int(t.spatialCoordinates.x)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
132            cv2.putText(frame, f"Y: {int(t.spatialCoordinates.y)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
133            cv2.putText(frame, f"Z: {int(t.spatialCoordinates.z)} mm", (x1 + 10, y1 + 95), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
134
135        cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
136
137        cv2.imshow("tracker", frame)
138
139        if cv2.waitKey(1) == ord('q'):
140            break

Pipeline

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.