# NN Archive Superblob

The example showcases loading a YOLOv6-Nano superblob model from the model zoo, configuring a neural network node with a 416x416
RGB camera input, and processing detection and passthrough outputs.

This example requires the DepthAI v3 API, see [installation instructions](https://docs.luxonis.com/software-v3/depthai.md).

## Pipeline

### examples/nn_archive_superblob.pipeline.json

```json
{"pipeline": {"connections": [{"node1Id": 1, "node1Output": "passthrough", "node1OutputGroup": "", "node2Id": 4, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 1, "node1Output": "out", "node1OutputGroup": "", "node2Id": 2, "node2Input": "in", "node2InputGroup": ""}, {"node1Id": 0, "node1Output": "0", "node1OutputGroup": "dynamicOutputs", "node2Id": 1, "node2Input": "in", "node2InputGroup": ""}], "globalProperties": {"calibData": null, "cameraTuningBlobSize": null, "cameraTuningBlobUri": "", "leonCssFrequencyHz": 700000000.0, "leonMssFrequencyHz": 700000000.0, "pipelineName": null, "pipelineVersion": null, "sippBufferSize": 18432, "sippDmaBufferSize": 16384, "xlinkChunkSize": -1}, "nodes": [[4, {"alias": "", "id": 4, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 8, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_1_passthrough"}}], [2, {"alias": "", "id": 2, "ioInfo": [[["", "in"], {"blocking": true, "group": "", "id": 7, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "XLinkOut", "parentId": -1, "properties": {"maxFpsLimit": -1.0, "metadataOnly": false, "streamName": "__x_1_out"}}], [1, {"alias": "", "id": 1, "ioInfo": [[["", "passthrough"], {"blocking": false, "group": "", "id": 6, "name": "passthrough", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "out"], {"blocking": false, "group": "", "id": 5, "name": "out", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "in"], {"blocking": true, "group": "", "id": 4, "name": "in", "queueSize": 3, "type": 3, "waitForMessage": true}]], "logLevel": 3, "name": "NeuralNetwork", "parentId": -1, "properties": {"backend": "", "backendProperties": {}, "blobSize": 8691690, "blobUri": "asset:__blob", "modelSource": 0, "modelUri": "", "numFrames": 8, "numNCEPerThread": 0, "numShavesPerThread": 0, "numThreads": 2}}], [0, {"alias": "", "id": 0, "ioInfo": [[["dynamicOutputs", "0"], {"blocking": false, "group": "dynamicOutputs", "id": 3, "name": "0", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "raw"], {"blocking": false, "group": "", "id": 2, "name": "raw", "queueSize": 8, "type": 0, "waitForMessage": false}], [["", "mockIsp"], {"blocking": true, "group": "", "id": 1, "name": "mockIsp", "queueSize": 8, "type": 3, "waitForMessage": false}], [["", "inputControl"], {"blocking": true, "group": "", "id": 0, "name": "inputControl", "queueSize": 3, "type": 3, "waitForMessage": false}]], "logLevel": 3, "name": "Camera", "parentId": -1, "properties": {"boardSocket": 0, "cameraName": "", "fps": -1.0, "imageOrientation": -1, "initialControl": {"aeLockMode": false, "aeMaxExposureTimeUs": 1647981365, "aeRegion": {"height": 14390, "priority": 979723322, "width": 14948, "x": 24929, "y": 25146}, "afRegion": {"height": 26214, "priority": 979788602, "width": 14901, "x": 25953, "y": 13626}, "antiBandingMode": 58, "autoFocusMode": 3, "awbLockMode": false, "awbMode": 55, "brightness": 98, "captureIntent": 49, "chromaDenoise": 53, "cmdMask": 0, "contrast": 58, "controlMode": 52, "effectMode": 58, "enableHdr": false, "expCompensation": 48, "expManual": {"exposureTimeUs": 909796454, "frameDurationUs": 979644730, "sensitivityIso": 1681013345}, "frameSyncMode": 49, "lensPosAutoInfinity": 58, "lensPosAutoMacro": 102, "lensPosition": 0, "lensPositionRaw": 0.0, "lowPowerNumFramesBurst": 54, "lowPowerNumFramesDiscard": 58, "lumaDenoise": 58, "miscControls": [], "saturation": 51, "sceneMode": 51, "sharpness": 52, "strobeConfig": {"activeLevel": 58, "enable": 97, "gpioNumber": 98}, "strobeTimings": {"durationUs": 976434746, "exposureBeginOffsetUs": 825909560, "exposureEndOffsetUs": 925973042}, "wbColorTemp": 13626}, "isp3aFps": 0, "mockIspHeight": -1, "mockIspWidth": -1, "numFramesPoolIsp": 3, "numFramesPoolPreview": 4, "numFramesPoolRaw": 3, "numFramesPoolStill": 4, "numFramesPoolVideo": 4, "outputRequests": [{"enableUndistortion": null, "fps": {"value": null}, "resizeMode": 0, "size": {"value": {"index": 0, "value": [416, 416]}}, "type": 8}], "resolutionHeight": -1, "resolutionWidth": -1}}]]}}
```

## Source code

#### Python

```python
#!/usr/bin/env python3

import time
import depthai as dai

# We will download a blob NNArchive from the model zoo
# Pick your own model from
modelDescription = dai.NNModelDescription()
modelDescription.model = "yolov6-nano"
modelDescription.platform = "RVC2"

# Download model from zoo and load it
archivePath = dai.getModelFromZoo(modelDescription, useCached=True)
archive = dai.NNArchive(archivePath)

# Archive knows it is a blob archive
assert archive.getModelType() == dai.ModelType.SUPERBLOB

# Therefore, getSuperBlob() is available
assert archive.getSuperBlob() is not None

# There is no blob or other model format available
assert archive.getBlob() is None
assert archive.getOtherModelFormat() is None

# You can access any config version
v1config: dai.nn_archive.v1.Config = archive.getConfig()

# Print some config fields
print("-" * 10)
print("Config fields:")
print(f"\tConfig version: {v1config.configVersion}")
print(f"\tModel heads: {v1config.model.heads}")
print(f"\tModel inputs: {v1config.model.inputs}")
print(f"\tModel metadata: {v1config.model.metadata}")
print(f"\tModel outputs: {v1config.model.outputs}")
print("-" * 10)

with dai.Pipeline() as pipeline:
    # Color camera node
    camRgb = pipeline.create(dai.node.Camera).build()
    outCam = camRgb.requestOutput((416, 416), dai.ImgFrame.Type.BGR888p)

    # Neural network node
    blob = archive.getSuperBlob().getBlobWithNumShaves(6)
    neuralNetwork = pipeline.create(dai.node.NeuralNetwork)
    neuralNetwork.setBlob(blob)
    neuralNetwork.setNumInferenceThreads(2)

    # Linking
    outCam.link(neuralNetwork.input)

    nnDetectionQueue = neuralNetwork.out.createOutputQueue()
    nnPassthroughQueue = neuralNetwork.passthrough.createOutputQueue()

    pipeline.start()

    while pipeline.isRunning():
        in_nn = nnDetectionQueue.get()
        in_nnPassthrough = nnPassthroughQueue.get()
        print("Data received")
        time.sleep(0.1)
```

#### C++

```cpp
#include <atomic>
#include <chrono>
#include <csignal>
#include <iostream>
#include <memory>
#include <thread>

#include "depthai/depthai.hpp"
#include "depthai/openvino/OpenVINO.hpp"

// Global flag for graceful shutdown
std::atomic<bool> quitEvent(false);

// Signal handler
void signalHandler(int signum) {
    quitEvent = true;
}

int main() {
    // Set up signal handlers
    signal(SIGTERM, signalHandler);
    signal(SIGINT, signalHandler);

    try {
        // Get model from zoo
        dai::NNModelDescription modelDesc;
        modelDesc.model = "yolov6-nano";
        modelDesc.platform = "RVC2";
        auto archivePath = dai::getModelFromZoo(modelDesc, true);  // true to use cached if available, otherwise re-download

        // Load NN archive
        dai::NNArchive archive(archivePath);

        // Verify archive type and properties
        if(archive.getModelType() != dai::model::ModelType::SUPERBLOB) {
            throw std::runtime_error("Archive is not a superblob type");
        }

        if(!archive.getSuperBlob()) {
            throw std::runtime_error("SuperBlob should not be null for superblob type");
        }

        if(archive.getBlob()) {
            throw std::runtime_error("Blob should be null for superblob type");
        }

        // Get config and print some fields
        auto config = archive.getConfig<dai::nn_archive::v1::Config>();
        std::cout << "----------" << std::endl;
        std::cout << "Config fields:" << std::endl;
        std::cout << "\tConfig version: " << config.configVersion.value() << std::endl;
        std::cout << "\tModel heads: " << config.model.heads.value().size() << std::endl;
        std::cout << "\tModel inputs: " << config.model.inputs.size() << std::endl;
        std::cout << "\tModel outputs: " << config.model.outputs.size() << std::endl;
        std::cout << "----------" << std::endl;

        // Create pipeline
        dai::Pipeline pipeline;

        // Color camera node
        auto camRgb = pipeline.create<dai::node::Camera>()->build();
        auto camOut = camRgb->requestOutput(std::make_pair(416, 416), dai::ImgFrame::Type::BGR888p);

        // Neural network node
        auto neuralNetwork = pipeline.create<dai::node::NeuralNetwork>();
        neuralNetwork->setBlob(archive.getSuperBlob()->getBlobWithNumShaves(6));
        neuralNetwork->setNumInferenceThreads(2);

        // Linking
        camOut->link(neuralNetwork->input);

        // Create output queues
        auto qDet = neuralNetwork->out.createOutputQueue();
        auto qPassthrough = neuralNetwork->passthrough.createOutputQueue();

        // Start pipeline
        pipeline.start();

        while(pipeline.isRunning() && !quitEvent) {
            auto inDet = qDet->get<dai::NNData>();
            auto inPassthrough = qPassthrough->get<dai::ImgFrame>();

            if(inDet != nullptr) {
                std::cout << "Detection data received" << std::endl;
            }

            if(inPassthrough != nullptr) {
                std::cout << "Passthrough frame received" << std::endl;
            }

            std::this_thread::sleep_for(std::chrono::milliseconds(100));
        }

        // Cleanup
        pipeline.stop();
        pipeline.wait();

    } catch(const std::exception& e) {
        std::cerr << "Error: " << e.what() << std::endl;
        return 1;
    }

    return 0;
}
```

### Need assistance?

Head over to [Discussion Forum](https://discuss.luxonis.com/) for technical support or any other questions you might have.
