# Camera raw output

This example shows how to use the raw output from the Camera node. It also unpacks RAW10 (sensor) data into viewable OpenCV frame,
and shows it in a window.

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/camera_raw.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 3, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "raw", "node1OutputGroup": "", "node2Id": 1, "node2Input": "in", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[3, {"alias": "", "id": 3, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 5, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_0"}}], [1, {"alias": "", "id": 1, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 4, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_0_raw"}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 100663297, "aeRegion": {"height": 0, "priority": 2501789088, "width": 0, "x": 4163, "y": 1}, "afRegion": {"height": 0, "priority": 0, "width": 0, "x": 23766, "y": 0}, "antiBandingMode": 30, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 232, "brightness": -86, "captureIntent": 149, "chromaDenoise": 0, "cmdMask": 0, "contrast": 43, "controlMode": 214, "effectMode": 92, "enableHdr": false, "expCompensation": -41, "expManual": {"exposureTimeUs": 23766, "frameDurationUs": 23766, "sensitivityIso": 2501789196}, "frameSyncMode": 0, "lensPosAutoInfinity": 32, "lensPosAutoMacro": 70, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 0, "lowPowerNumFramesDiscard": 0, "lumaDenoise": 0, "miscControls": [], "saturation": 25, "sceneMode": 69, "sharpness": 0, "strobeConfig": {"activeLevel": 72, "enable": 0, "gpioNumber": 76}, "strobeTimings": {"durationUs": 4, "exposureBeginOffsetUs": 23766, "exposureEndOffsetUs": 17}, "wbColorTemp": 0}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [4056, 3040]}}, "type": null}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3

import cv2
import depthai as dai
import numpy as np

def unpackRaw10(rawData, width, height, stride=None):
    """
    Unpacks RAW10 data from DepthAI pipeline into a 16-bit grayscale array.
    :param raw_data: List of raw bytes from DepthAI (1D numpy array)
    :param width: Image width
    :param height: Image height
    :param stride: Row stride in bytes (if None, calculated as width*10/8)
    :return: Unpacked 16-bit grayscale image with dimensions width×height
    """
    if stride is None:
        stride = width * 10 // 8
    expectedSize = stride * height

    if len(rawData) < expectedSize:
        raise ValueError(f"Data too small: {len(rawData)} bytes, expected {expectedSize}")

    # Convert raw_data to numpy array
    packedData = np.frombuffer(rawData, dtype=np.uint8)

    # Process image row by row to handle stride correctly
    result = np.zeros((height, width), dtype=np.uint16)

    for row in range(height):
        # Get row data using stride
        rowStart = row * stride
        rowData = packedData[rowStart:rowStart + stride]
        # Calculate how many complete 5-byte groups we need for width pixels
        numGroups = (width + 3) // 4  # Ceiling division
        rowBytes = numGroups * 5
        # Ensure we don't go beyond available data
        if len(rowData) < rowBytes:
            break

        # Process only the bytes we need for this row
        rowPacked = rowData[:rowBytes].reshape(-1, 5)
        rowUnpacked = np.zeros((rowPacked.shape[0], 4), dtype=np.uint16)

        # Extract 8 most significant bits
        rowUnpacked[:, 0] = rowPacked[:, 0].astype(np.uint16) << 2
        rowUnpacked[:, 1] = rowPacked[:, 1].astype(np.uint16) << 2
        rowUnpacked[:, 2] = rowPacked[:, 2].astype(np.uint16) << 2
        rowUnpacked[:, 3] = rowPacked[:, 3].astype(np.uint16) << 2

        # Extract least significant 2 bits from 5th byte
        rowUnpacked[:, 0] |= (rowPacked[:, 4] & 0b00000011)
        rowUnpacked[:, 1] |= (rowPacked[:, 4] & 0b00001100) >> 2
        rowUnpacked[:, 2] |= (rowPacked[:, 4] & 0b00110000) >> 4
        rowUnpacked[:, 3] |= (rowPacked[:, 4] & 0b11000000) >> 6

        # Flatten and copy only the required width pixels to result
        rowFlat = rowUnpacked.flatten()
        result[row, :width] = rowFlat[:width]

    # Scale from 10-bit (0-1023) to 16-bit (0-65535) for proper display
    result16bit = (result * 64).astype(np.uint16)
    return result16bit

# Create pipeline
with dai.Pipeline() as pipeline:
    # Define source and output
    cam = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
    rawQueue = cam.raw.createOutputQueue()
    videoQueue = cam.requestFullResolutionOutput().createOutputQueue()
    # Connect to device and start pipeline
    pipeline.start()
    while pipeline.isRunning():
        videoIn = videoQueue.tryGet()
        rawFrame = rawQueue.tryGet()
        if rawFrame is not None:
            assert isinstance(rawFrame, dai.ImgFrame)
            dataRaw = rawFrame.getData()
            parsedImage = unpackRaw10(dataRaw, rawFrame.getWidth(), rawFrame.getHeight(), rawFrame.getStride())
            cv2.imshow("raw", parsedImage)
        if videoIn is not None:
            assert isinstance(videoIn, dai.ImgFrame)
            cv2.imshow("video", videoIn.getCvFrame())

        if cv2.waitKey(1) == ord("q"):
            break
```

#### C++

```cpp
#include <atomic>
#include <csignal>
#include <iostream>
#include <memory>
#include <opencv2/opencv.hpp>
#include <vector>

#include "depthai/depthai.hpp"

std::atomic<bool> quitEvent(false);

void signalHandler(int) {
    quitEvent = true;
}

cv::Mat unpackRaw10(const std::vector<uint8_t>& rawData, int width, int height, int stride = -1) {
    if(stride == -1) {
        stride = width * 10 / 8;
    }
    int expectedSize = stride * height;

    if(rawData.size() < expectedSize) {
        throw std::runtime_error("Data too small: " + std::to_string(rawData.size()) + " bytes, expected " + std::to_string(expectedSize));
    }

    // Create output matrix
    cv::Mat result(height, width, CV_16UC1);

    // Process image row by row to handle stride correctly
    for(int row = 0; row < height; row++) {
        // Get row data using stride
        const uint8_t* rowStart = rawData.data() + row * stride;

        // Calculate how many complete 5-byte groups we need for width pixels
        int numGroups = (width + 3) / 4;  // Ceiling division
        int rowBytes = numGroups * 5;

        // Ensure we don't go beyond available data
        if(rowBytes > stride) break;

        // Process each 5-byte group
        for(int i = 0; i < numGroups; i++) {
            const uint8_t* group = rowStart + i * 5;
            uint16_t pixels[4];

            // Extract 8 most significant bits
            pixels[0] = (group[0] << 2);
            pixels[1] = (group[1] << 2);
            pixels[2] = (group[2] << 2);
            pixels[3] = (group[3] << 2);

            // Extract least significant 2 bits from 5th byte
            pixels[0] |= (group[4] & 0b00000011);
            pixels[1] |= ((group[4] & 0b00001100) >> 2);
            pixels[2] |= ((group[4] & 0b00110000) >> 4);
            pixels[3] |= ((group[4] & 0b11000000) >> 6);

            // Copy pixels to result
            for(int j = 0; j < 4 && (i * 4 + j) < width; j++) {
                result.at<uint16_t>(row, i * 4 + j) = pixels[j] * 64;  // Scale from 10-bit to 16-bit
            }
        }
    }

    return result;
}

int main() {
    signal(SIGTERM, signalHandler);
    signal(SIGINT, signalHandler);

    // Create device
    std::shared_ptr<dai::Device> device = std::make_shared<dai::Device>();

    // Create pipeline
    dai::Pipeline pipeline(device);

    // Create nodes
    auto cam = pipeline.create<dai::node::Camera>()->build(dai::CameraBoardSocket::CAM_A);
    auto rawQueue = cam->raw.createOutputQueue();
    auto videoQueue = cam->requestFullResolutionOutput()->createOutputQueue();

    // Start pipeline
    pipeline.start();

    while(pipeline.isRunning() && !quitEvent) {
        auto videoIn = videoQueue->tryGet<dai::ImgFrame>();
        auto rawFrame = rawQueue->tryGet<dai::ImgFrame>();

        if(rawFrame != nullptr) {
            auto dataRaw = rawFrame->getData();
            std::vector<uint8_t> dataRawVec(dataRaw.begin(), dataRaw.end());
            try {
                cv::Mat parsedImage = unpackRaw10(dataRawVec, rawFrame->getWidth(), rawFrame->getHeight(), rawFrame->getStride());
                cv::imshow("raw", parsedImage);
            } catch(const std::exception& e) {
                std::cerr << "Error processing raw frame: " << e.what() << std::endl;
            }
        }

        if(videoIn != nullptr) {
            cv::imshow("video", videoIn->getCvFrame());
        }

        if(cv::waitKey(1) == 'q') {
            break;
        }
    }

    pipeline.stop();
    pipeline.wait();

    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
