Software Stack
DepthAI

ON THIS PAGE

  • Camera raw output
  • Pipeline
  • Source code

Camera raw output

Supported on:RVC2RVC4
This example shows how to use the raw output from the Camera node. It also unpacks RAW10 (sensor) data into viewable OpenCV frame, and shows it in a window.This example requires the DepthAI v3 API, see installation instructions.

Pipeline

Source code

Python
C++

Python

Python
GitHub
1#!/usr/bin/env python3
2
3import cv2
4import depthai as dai
5import numpy as np
6
7def unpackRaw10(rawData, width, height, stride=None):
8    """
9    Unpacks RAW10 data from DepthAI pipeline into a 16-bit grayscale array.
10    :param raw_data: List of raw bytes from DepthAI (1D numpy array)
11    :param width: Image width
12    :param height: Image height
13    :param stride: Row stride in bytes (if None, calculated as width*10/8)
14    :return: Unpacked 16-bit grayscale image with dimensions width×height
15    """
16    if stride is None:
17        stride = width * 10 // 8
18    expectedSize = stride * height
19
20    if len(rawData) < expectedSize:
21        raise ValueError(f"Data too small: {len(rawData)} bytes, expected {expectedSize}")
22
23    # Convert raw_data to numpy array
24    packedData = np.frombuffer(rawData, dtype=np.uint8)
25
26    # Process image row by row to handle stride correctly
27    result = np.zeros((height, width), dtype=np.uint16)
28
29    for row in range(height):
30        # Get row data using stride
31        rowStart = row * stride
32        rowData = packedData[rowStart:rowStart + stride]
33        # Calculate how many complete 5-byte groups we need for width pixels
34        numGroups = (width + 3) // 4  # Ceiling division
35        rowBytes = numGroups * 5
36        # Ensure we don't go beyond available data
37        if len(rowData) < rowBytes:
38            break
39
40        # Process only the bytes we need for this row
41        rowPacked = rowData[:rowBytes].reshape(-1, 5)
42        rowUnpacked = np.zeros((rowPacked.shape[0], 4), dtype=np.uint16)
43
44        # Extract 8 most significant bits
45        rowUnpacked[:, 0] = rowPacked[:, 0].astype(np.uint16) << 2
46        rowUnpacked[:, 1] = rowPacked[:, 1].astype(np.uint16) << 2
47        rowUnpacked[:, 2] = rowPacked[:, 2].astype(np.uint16) << 2
48        rowUnpacked[:, 3] = rowPacked[:, 3].astype(np.uint16) << 2
49
50        # Extract least significant 2 bits from 5th byte
51        rowUnpacked[:, 0] |= (rowPacked[:, 4] & 0b00000011)
52        rowUnpacked[:, 1] |= (rowPacked[:, 4] & 0b00001100) >> 2
53        rowUnpacked[:, 2] |= (rowPacked[:, 4] & 0b00110000) >> 4
54        rowUnpacked[:, 3] |= (rowPacked[:, 4] & 0b11000000) >> 6
55
56        # Flatten and copy only the required width pixels to result
57        rowFlat = rowUnpacked.flatten()
58        result[row, :width] = rowFlat[:width]
59
60    # Scale from 10-bit (0-1023) to 16-bit (0-65535) for proper display
61    result16bit = (result * 64).astype(np.uint16)
62    return result16bit
63
64# Create pipeline
65with dai.Pipeline() as pipeline:
66    # Define source and output
67    cam = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
68    rawQueue = cam.raw.createOutputQueue()
69    videoQueue = cam.requestFullResolutionOutput().createOutputQueue()
70    # Connect to device and start pipeline
71    pipeline.start()
72    while pipeline.isRunning():
73        videoIn = videoQueue.tryGet()
74        rawFrame = rawQueue.tryGet()
75        if rawFrame is not None:
76            assert isinstance(rawFrame, dai.ImgFrame)
77            dataRaw = rawFrame.getData()
78            parsedImage = unpackRaw10(dataRaw, rawFrame.getWidth(), rawFrame.getHeight(), rawFrame.getStride())
79            cv2.imshow("raw", parsedImage)
80        if videoIn is not None:
81            assert isinstance(videoIn, dai.ImgFrame)
82            cv2.imshow("video", videoIn.getCvFrame())
83
84        if cv2.waitKey(1) == ord("q"):
85            break

Need assistance?

Head over to Discussion Forum for technical support or any other questions you might have.