Integration with Roboflow
Note that we currently only support object detection tasks when deploying Roboflow models on the OAK devices.
Installation
roboflowoak, depthai, and opencv-python:Command Line
1pip install roboflowoak
2pip install depthai
3pip install opencv-pythonDeploying a Model from Roboflow
model:Replace this with the ID of your model in Roboflow.version:Insert the specific version number of your model.api_key:Use the private API key provided by Roboflow for your account.
Python
1from roboflowoak import RoboflowOak
2import cv2
3import time
4import numpy as np
5
6if __name__ == '__main__':
7 # instantiating an object (rf) with the RoboflowOak module
8 rf = RoboflowOak(model="YOUR-MODEL-ID", confidence=0.05, overlap=0.5,
9 version="YOUR-MODEL-VERSION-#", api_key="YOUR-PRIVATE_API_KEY", rgb=True,
10 depth=True, device=None, blocking=True)
11 # Running our model and displaying the video output with detections
12 while True:
13 t0 = time.time()
14 # The rf.detect() function runs the model inference
15 result, frame, raw_frame, depth = rf.detect()
16 predictions = result["predictions"]
17 # {
18 # predictions:
19 # [ {
20 # x: (middle),
21 # y:(middle),
22 # width:
23 # height:
24 # depth: ###->
25 # confidence:
26 # class:
27 # mask: {
28 # ]
29 # }
30 # frame - frame after preprocs, with predictions
31 # raw_frame - original frame from your OAK
32 # depth - depth map for raw_frame, center-rectified to the center camera
33
34 # timing: for benchmarking purposes
35 t = time.time()-t0
36 print("FPS ", 1/t)
37 print("PREDICTIONS ", [p.json() for p in predictions])
38
39 # setting parameters for depth calculation
40 # comment out the following 2 lines out if you're using an OAK without Depth
41 max_depth = np.amax(depth)
42 cv2.imshow("depth", depth/max_depth)
43 # displaying the video feed as successive frames
44 cv2.imshow("frame", frame)
45
46 # how to close the OAK inference window / stop inference: CTRL+q or CTRL+c
47 if cv2.waitKey(1) == ord('q'):
48 break