DepthAI
Software Stack

ON THIS PAGE

  • Pipeline
  • Source code

Camera raw output

Supported on:RVC2RVC4
This example shows how to use the raw output from the Camera node. It also unpacks RAW10 (sensor) data into viewable OpenCV frame, and shows it in a window.This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python

Python
GitHub
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5import numpy as np
6
7def unpackRaw10(rawData, width, height, stride=None):
8    """
9    Unpacks RAW10 data from DepthAI pipeline into a 16-bit grayscale array.
10    :param raw_data: List of raw bytes from DepthAI (1D numpy array)
11    :param width: Image width
12    :param height: Image height
13    :param stride: Row stride in bytes (if None, calculated as width*10/8)
14    :return: Unpacked 16-bit grayscale image with dimensions width×height
15    """
16    if stride is None:
17        stride = width * 10 // 8
18    expectedSize = stride * height
19
20    if len(rawData) < expectedSize:
21        raise ValueError(f"Data too small: {len(rawData)} bytes, expected {expectedSize}")
22
23    # Convert raw_data to numpy array
24    packedData = np.frombuffer(rawData, dtype=np.uint8)
25
26    # Process image row by row to handle stride correctly
27    result = np.zeros((height, width), dtype=np.uint16)
28
29    for row in range(height):
30        # Get row data using stride
31        rowStart = row * stride
32        rowData = packedData[rowStart:rowStart + stride]
33        # Calculate how many complete 5-byte groups we need for width pixels
34        numGroups = (width + 3) // 4  # Ceiling division
35        rowBytes = numGroups * 5
36        # Ensure we don't go beyond available data
37        if len(rowData) < rowBytes:
38            break
39
40        # Process only the bytes we need for this row
41        rowPacked = rowData[:rowBytes].reshape(-1, 5)
42        rowUnpacked = np.zeros((rowPacked.shape[0], 4), dtype=np.uint16)
43
44        # Extract 8 most significant bits
45        rowUnpacked[:, 0] = rowPacked[:, 0].astype(np.uint16) << 2
46        rowUnpacked[:, 1] = rowPacked[:, 1].astype(np.uint16) << 2
47        rowUnpacked[:, 2] = rowPacked[:, 2].astype(np.uint16) << 2
48        rowUnpacked[:, 3] = rowPacked[:, 3].astype(np.uint16) << 2
49
50        # Extract least significant 2 bits from 5th byte
51        rowUnpacked[:, 0] |= (rowPacked[:, 4] & 0b00000011)
52        rowUnpacked[:, 1] |= (rowPacked[:, 4] & 0b00001100) >> 2
53        rowUnpacked[:, 2] |= (rowPacked[:, 4] & 0b00110000) >> 4
54        rowUnpacked[:, 3] |= (rowPacked[:, 4] & 0b11000000) >> 6
55
56        # Flatten and copy only the required width pixels to result
57        rowFlat = rowUnpacked.flatten()
58        result[row, :width] = rowFlat[:width]
59
60    # Scale from 10-bit (0-1023) to 16-bit (0-65535) for proper display
61    result16bit = (result * 64).astype(np.uint16)
62    return result16bit
63
64# Create pipeline
65with dai.Pipeline() as pipeline:
66    # Define source and output
67    cam = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
68    rawQueue = cam.raw.createOutputQueue()
69    videoQueue = cam.requestFullResolutionOutput().createOutputQueue()
70    # Connect to device and start pipeline
71    pipeline.start()
72    while pipeline.isRunning():
73        videoIn = videoQueue.tryGet()
74        rawFrame = rawQueue.tryGet()
75        if rawFrame is not None:
76            assert isinstance(rawFrame, dai.ImgFrame)
77            dataRaw = rawFrame.getData()
78            parsedImage = unpackRaw10(dataRaw, rawFrame.getWidth(), rawFrame.getHeight(), rawFrame.getStride())
79            cv2.imshow("raw", parsedImage)
80        if videoIn is not None:
81            assert isinstance(videoIn, dai.ImgFrame)
82            cv2.imshow("video", videoIn.getCvFrame())
83
84        if cv2.waitKey(1) == ord("q"):
85            break

C++

1#include <iostream>
2#include <memory>
3#include <opencv2/opencv.hpp>
4#include <vector>
5
6#include "depthai/depthai.hpp"
7
8cv::Mat unpackRaw10(const std::vector<uint8_t>& rawData, int width, int height, int stride = -1) {
9    if(stride == -1) {
10        stride = width * 10 / 8;
11    }
12    int expectedSize = stride * height;
13
14    if(rawData.size() < expectedSize) {
15        throw std::runtime_error("Data too small: " + std::to_string(rawData.size()) + " bytes, expected " + std::to_string(expectedSize));
16    }
17
18    // Create output matrix
19    cv::Mat result(height, width, CV_16UC1);
20
21    // Process image row by row to handle stride correctly
22    for(int row = 0; row < height; row++) {
23        // Get row data using stride
24        const uint8_t* rowStart = rawData.data() + row * stride;
25
26        // Calculate how many complete 5-byte groups we need for width pixels
27        int numGroups = (width + 3) / 4;  // Ceiling division
28        int rowBytes = numGroups * 5;
29
30        // Ensure we don't go beyond available data
31        if(rowBytes > stride) break;
32
33        // Process each 5-byte group
34        for(int i = 0; i < numGroups; i++) {
35            const uint8_t* group = rowStart + i * 5;
36            uint16_t pixels[4];
37
38            // Extract 8 most significant bits
39            pixels[0] = (group[0] << 2);
40            pixels[1] = (group[1] << 2);
41            pixels[2] = (group[2] << 2);
42            pixels[3] = (group[3] << 2);
43
44            // Extract least significant 2 bits from 5th byte
45            pixels[0] |= (group[4] & 0b00000011);
46            pixels[1] |= ((group[4] & 0b00001100) >> 2);
47            pixels[2] |= ((group[4] & 0b00110000) >> 4);
48            pixels[3] |= ((group[4] & 0b11000000) >> 6);
49
50            // Copy pixels to result
51            for(int j = 0; j < 4 && (i * 4 + j) < width; j++) {
52                result.at<uint16_t>(row, i * 4 + j) = pixels[j] * 64;  // Scale from 10-bit to 16-bit
53            }
54        }
55    }
56
57    return result;
58}
59
60int main() {
61    // Create device
62    std::shared_ptr<dai::Device> device = std::make_shared<dai::Device>();
63
64    // Create pipeline
65    dai::Pipeline pipeline(device);
66
67    // Create nodes
68    auto cam = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_A);
69    auto rawQueue = cam->raw.createOutputQueue();
70    auto videoQueue = cam->requestFullResolutionOutput()->createOutputQueue();
71
72    // Start pipeline
73    pipeline.start();
74
75    while(true) {
76        auto videoIn = videoQueue->tryGet<dai::ImgFrame>();
77        auto rawFrame = rawQueue->tryGet<dai::ImgFrame>();
78
79        if(rawFrame != nullptr) {
80            auto dataRaw = rawFrame->getData();
81            std::vector<uint8_t> dataRawVec(dataRaw.begin(), dataRaw.end());
82            try {
83                cv::Mat parsedImage = unpackRaw10(dataRawVec, rawFrame->getWidth(), rawFrame->getHeight(), rawFrame->getStride());
84                cv::imshow("raw", parsedImage);
85            } catch(const std::exception& e) {
86                std::cerr << "Error processing raw frame: " << e.what() << std::endl;
87            }
88        }
89
90        if(videoIn != nullptr) {
91            cv::imshow("video", videoIn->getCvFrame());
92        }
93
94        if(cv::waitKey(1) == 'q') {
95            break;
96        }
97    }
98
99    return 0;
100}

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.