Mono Preview¶
This example shows how to set up a pipeline that outputs the left and right grayscale camera images, connects over XLink to transfer these to the host real-time, and displays both using OpenCV.
Similar samples:
Demo¶
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | #!/usr/bin/env python3 import cv2 import depthai as dai # Create pipeline pipeline = dai.Pipeline() # Define sources and outputs monoLeft = pipeline.create(dai.node.MonoCamera) monoRight = pipeline.create(dai.node.MonoCamera) xoutLeft = pipeline.create(dai.node.XLinkOut) xoutRight = pipeline.create(dai.node.XLinkOut) xoutLeft.setStreamName('left') xoutRight.setStreamName('right') # Properties monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Linking monoRight.out.link(xoutRight.input) monoLeft.out.link(xoutLeft.input) # Connect to device and start pipeline with dai.Device(pipeline) as device: # Output queues will be used to get the grayscale frames from the outputs defined above qLeft = device.getOutputQueue(name="left", maxSize=4, blocking=False) qRight = device.getOutputQueue(name="right", maxSize=4, blocking=False) while True: # Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise inLeft = qLeft.tryGet() inRight = qRight.tryGet() if inLeft is not None: cv2.imshow("left", inLeft.getCvFrame()) if inRight is not None: cv2.imshow("right", inRight.getCvFrame()) if cv2.waitKey(1) == ord('q'): break |
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | #include <iostream> // Includes common necessary includes for development using depthai library #include "depthai/depthai.hpp" int main() { // Create pipeline dai::Pipeline pipeline; // Define sources and outputs auto monoLeft = pipeline.create<dai::node::MonoCamera>(); auto monoRight = pipeline.create<dai::node::MonoCamera>(); auto xoutLeft = pipeline.create<dai::node::XLinkOut>(); auto xoutRight = pipeline.create<dai::node::XLinkOut>(); xoutLeft->setStreamName("left"); xoutRight->setStreamName("right"); // Properties monoLeft->setCamera("left"); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); monoRight->setCamera("right"); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); // Linking monoRight->out.link(xoutRight->input); monoLeft->out.link(xoutLeft->input); // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the grayscale frames from the outputs defined above auto qLeft = device.getOutputQueue("left", 4, false); auto qRight = device.getOutputQueue("right", 4, false); while(true) { // Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise auto inLeft = qLeft->tryGet<dai::ImgFrame>(); auto inRight = qRight->tryGet<dai::ImgFrame>(); if(inLeft) { cv::imshow("left", inLeft->getCvFrame()); } if(inRight) { cv::imshow("right", inRight->getCvFrame()); } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { return 0; } } return 0; } |