Skip to content

Commit

Permalink
Add qnn_quantized_test
Browse files Browse the repository at this point in the history
  • Loading branch information
ciaranbor committed Jul 28, 2024
1 parent 18b592c commit d833113
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 1 deletion.
2 changes: 1 addition & 1 deletion test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ endif()

if(edgerunner_ENABLE_NPU)
list(APPEND TEST_SOURCES source/qnn_shared_library_npu_test.cpp
source/qnn_context_binary_npu_test.cpp
source/qnn_context_binary_npu_test.cpp source/qnn_quantized_test.cpp
)
endif()

Expand Down
66 changes: 66 additions & 0 deletions test/source/qnn_quantized_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>

#include <catch2/catch_test_macros.hpp>

#include "edgerunner/edgerunner.hpp"
#include "edgerunner/model.hpp"
#include "edgerunner/tensor.hpp"
#include "utils.hpp"

TEST_CASE("QNN runtime quantized (NPU)", "[qnn][npu][quantized]") {
const std::string modelPath = "models/qnn/mobilenet_v3_large_quantized.so";

auto model = edge::createModel(modelPath);
REQUIRE(model != nullptr);
REQUIRE(std::string {"mobilenet_v3_large_quantized"} == model->name());

model->applyDelegate(edge::DELEGATE::CPU);
REQUIRE(model->getDelegate() == edge::DELEGATE::CPU);

REQUIRE(model->getPrecision() == edge::TensorType::UINT8);

const auto inputs = model->getInputs();
const auto numInputs = model->getNumInputs();
REQUIRE(numInputs == 1);
REQUIRE(numInputs == inputs.size());

const auto outputs = model->getOutputs();
const auto numOutputs = model->getNumOutputs();
REQUIRE(numOutputs == 1);
REQUIRE(numOutputs == outputs.size());

auto input = model->getInput(0);
REQUIRE(input->getName() == "image_tensor");
REQUIRE(input->getDimensions() == std::vector<size_t> {1, 224, 224, 3});
REQUIRE(input->getType() == edge::TensorType::UINT8);
REQUIRE(input.get() == inputs[0].get());

auto inputData = input->getTensorAs<uint8_t>();
REQUIRE(inputData.size() == input->getSize());

auto badInput = model->getInput(1);
REQUIRE(badInput == nullptr);

auto output = model->getOutput(0);
REQUIRE(output->getName() == "_668");
REQUIRE(output->getDimensions() == std::vector<size_t> {1, 1000});
REQUIRE(output->getType() == edge::TensorType::UINT8);
REQUIRE(output.get() == outputs[0].get());

auto outputBuffer = output->getTensorAs<uint8_t>();
REQUIRE(outputBuffer.size() == output->getSize());

auto badOutput = model->getOutput(1);
REQUIRE(badOutput == nullptr);

const auto executionStatus = model->execute();
REQUIRE(executionStatus == edge::STATUS::SUCCESS);

/* verify output buffer is persistent across execution */
const auto newOutputBuffer = model->getOutput(0)->getTensorAs<uint8_t>();
REQUIRE(outputBuffer.data() == newOutputBuffer.data());
REQUIRE(outputBuffer.size() == newOutputBuffer.size());
}

0 comments on commit d833113

Please sign in to comment.