Software Stack
DepthAI
  • DepthAI Components
    • AprilTags
    • Benchmark
    • Camera
    • DetectionNetwork
    • EdgeDetector
    • Events
    • FeatureTracker
    • HostNodes
    • ImageAlign
    • ImageManip
    • IMU
    • Misc
    • Model Zoo
    • NeuralNetwork
    • RecordReplay
    • RGBD
    • Script
    • SpatialDetectionNetwork
    • SpatialLocationCalculator
    • StereoDepth
    • Sync
    • SystemLogger
    • VideoEncoder
    • Visualizer
    • Warp
    • RVC2-specific
  • Advanced Tutorials
  • API Reference
  • Tools

ON THIS PAGE

  • Feature Tracker
  • Demo
  • Pipeline
  • Source code

Feature Tracker

This example showscases the FeatureTracker node. It detects features and tracks them between consecutive frames using optical flow by assigning unique ID to matching features.

Demo

This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python
C++

Python

Python
GitHub
1import cv2
2from collections import deque
3import depthai as dai
4
5class FeatureTrackerDrawer:
6    lineColor = (200, 0, 200)
7    pointColor = (0, 0, 255)
8    circleRadius = 2
9    maxTrackedFeaturesPathLength = 30
10    trackedFeaturesPathLength = 10
11
12    trackedIDs = None
13    trackedFeaturesPath = None
14
15    def onTrackBar(self, val):
16        FeatureTrackerDrawer.trackedFeaturesPathLength = val
17        pass
18
19    def trackFeaturePath(self, features):
20
21        newTrackedIDs = set()
22        for currentFeature in features:
23            currentID = currentFeature.id
24            newTrackedIDs.add(currentID)
25
26            if currentID not in self.trackedFeaturesPath:
27                self.trackedFeaturesPath[currentID] = deque()
28
29            path = self.trackedFeaturesPath[currentID]
30
31            path.append(currentFeature.position)
32            while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)):
33                path.popleft()
34
35            self.trackedFeaturesPath[currentID] = path
36
37        featuresToRemove = set()
38        for oldId in self.trackedIDs:
39            if oldId not in newTrackedIDs:
40                featuresToRemove.add(oldId)
41
42        for id in featuresToRemove:
43            self.trackedFeaturesPath.pop(id)
44
45        self.trackedIDs = newTrackedIDs
46
47    def drawFeatures(self, img):
48
49        cv2.setTrackbarPos(self.trackbarName, self.windowName, FeatureTrackerDrawer.trackedFeaturesPathLength)
50
51        for featurePath in self.trackedFeaturesPath.values():
52            path = featurePath
53
54            for j in range(len(path) - 1):
55                src = (int(path[j].x), int(path[j].y))
56                dst = (int(path[j + 1].x), int(path[j + 1].y))
57                cv2.line(img, src, dst, self.lineColor, 1, cv2.LINE_AA, 0)
58            j = len(path) - 1
59            cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0)
60
61    def __init__(self, trackbarName, windowName):
62        self.trackbarName = trackbarName
63        self.windowName = windowName
64        cv2.namedWindow(windowName)
65        cv2.createTrackbar(trackbarName, windowName, FeatureTrackerDrawer.trackedFeaturesPathLength, FeatureTrackerDrawer.maxTrackedFeaturesPathLength, self.onTrackBar)
66        self.trackedIDs = set()
67        self.trackedFeaturesPath = dict()
68
69print("Press 'm' to enable/disable motion estimation!")
70
71inputConfigQueue = None
72def on_trackbar(val):
73    try:
74        cfg = dai.FeatureTrackerConfig()
75        cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
76        cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')
77        cornerDetector.numTargetFeatures = cornerDetector.numMaxFeatures
78
79        thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
80        thresholds.initialValue = cv2.getTrackbarPos('harrisScore','Features')
81        cornerDetector.thresholds = thresholds
82    except cv2.error as e:
83        pass
84
85    cfg.setCornerDetector(cornerDetector)
86    if inputConfigQueue:
87        inputConfigQueue.send(cfg)
88
89cv2.namedWindow('Features', cv2.WINDOW_NORMAL)
90cv2.resizeWindow('Features', 1080, 800)
91
92cv2.createTrackbar('harrisScore','Features',20000,25000, on_trackbar)
93cv2.createTrackbar('numMaxFeatures','Features',256,1024, on_trackbar)
94
95# Create pipeline
96with dai.Pipeline() as pipeline:
97    camera = pipeline.create(dai.node.Camera).build()
98    camOutput = camera.requestOutput((640, 640), dai.ImgFrame.Type.NV12)
99    manip = pipeline.create(dai.node.ImageManip)
100    manip.initialConfig.setFrameType(dai.ImgFrame.Type.GRAY8)
101    camOutput.link(manip.inputImage)
102
103    featureTracker = pipeline.create(dai.node.FeatureTracker)
104
105    featureTracker.initialConfig.setCornerDetector(dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS)
106    featureTracker.initialConfig.setMotionEstimator(False)
107    featureTracker.initialConfig.setNumTargetFeatures(256)
108
109    motionEstimator = dai.FeatureTrackerConfig.MotionEstimator()
110    motionEstimator.enable = True
111    featureTracker.initialConfig.setMotionEstimator(motionEstimator)
112
113    cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
114    cornerDetector.numMaxFeatures = 256
115    cornerDetector.numTargetFeatures = cornerDetector.numMaxFeatures
116
117    # RVC2 specific setting to allow for more features
118    featureTracker.setHardwareResources(2,2)
119
120    outputFeaturePassthroughQueue = camOutput.createOutputQueue()
121    outputFeatureQueue = featureTracker.outputFeatures.createOutputQueue()
122
123    manip.out.link(featureTracker.inputImage)
124
125    inputConfigQueue = featureTracker.inputConfig.createInputQueue()
126
127    thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
128    thresholds.initialValue = cv2.getTrackbarPos('harrisScore','Features')
129
130    cornerDetector.thresholds = thresholds
131    featureTracker.initialConfig.setCornerDetector(cornerDetector)
132
133    leftWindowName = "Features"
134    leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)
135
136    pipeline.start()
137    while pipeline.isRunning():
138        outputPassthroughImage : dai.ImgFrame = outputFeaturePassthroughQueue.get()
139
140        passthroughImage = outputPassthroughImage.getCvFrame()
141        trackedFeaturesLeft = outputFeatureQueue.get().trackedFeatures
142
143
144        leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft)
145        leftFeatureDrawer.drawFeatures(passthroughImage)
146
147        # Show the frame
148        cv2.imshow(leftWindowName, passthroughImage)
149
150        key = cv2.waitKey(1)
151        if key == ord('q'):
152            break
153        elif key == ord('m'):
154            cfg = dai.FeatureTrackerConfig()
155            cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
156            cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')
157            cornerDetector.numTargetFeatures = cornerDetector.numMaxFeatures
158
159            thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
160            thresholds.initialValue = cv2.getTrackbarPos('harrisScore','Features')
161            cornerDetector.thresholds = thresholds
162
163            cfg.setCornerDetector(cornerDetector)
164            cfg.setMotionEstimator(motionEstimator)
165
166            if motionEstimator.enable == False:
167                motionEstimator.enable = True
168                cfg.setMotionEstimator(motionEstimator)
169                print("Enabling motionEstimator")
170            else:
171                motionEstimator.enable = False
172                cfg.setMotionEstimator(motionEstimator)
173                print("Disabling motionEstimator")
174
175            inputConfigQueue.send(cfg)

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.