summaryrefslogtreecommitdiff
path: root/nn
diff options
context:
space:
mode:
authorSlava Shklyaev <slavash@google.com>2019-04-01 14:04:32 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2019-04-01 14:04:32 +0000
commit8e0cb533bacca9f173529e2720c8b9eff8e658c7 (patch)
tree6cbcbdf23c3c49dc69ba1660b5ddc7ce7540d49c /nn
parent8c976d1961a9654d1f95a8d67ee53ee8ce4df0c5 (diff)
parentd828d15d5c07e3939b2f419466e1648a24af9709 (diff)
downloadml-8e0cb533bacca9f173529e2720c8b9eff8e658c7.tar.gz
Merge changes I1cd258e3,Ibe0fc535
* changes: Document NNAPI extensions Add a test Fibonacci extension
Diffstat (limited to 'nn')
-rw-r--r--nn/extensions/README.md160
-rw-r--r--nn/extensions/test_vendor/fibonacci/Android.bp6
-rw-r--r--nn/extensions/test_vendor/fibonacci/FibonacciExtension.h73
-rw-r--r--nn/runtime/Manager.h11
-rw-r--r--nn/runtime/test/Android.bp3
-rw-r--r--nn/runtime/test/TestExtensions.cpp7
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp208
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciDriver.h59
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp338
9 files changed, 861 insertions, 4 deletions
diff --git a/nn/extensions/README.md b/nn/extensions/README.md
new file mode 100644
index 000000000..c7174431f
--- /dev/null
+++ b/nn/extensions/README.md
@@ -0,0 +1,160 @@
+# NNAPI vendor extensions
+
+In Android Q, Neural Networks API introduced vendor extensions -- a better,
+more structured alternative to the OEM operation and data types. Extensions
+allow vendors to provide custom hardware-accellerated operations via NNAPI.
+
+TODO(pszczepaniak): Which apps can use vendor extensions?
+
+This document explains how to create and use extensions.
+
+## Extension definition
+
+The vendor is expected to create and maintain a header file with the
+extension specification. A complete sample extension definition is provided in
+`test_vendor/fibonacci/FibonacciExtension.h`.
+
+Each extension must have a unique name that starts with the reverse domain name
+of the vendor:
+```c
+const char* const MY_EXTENSION_NAME = "com.example.my_extension";
+```
+
+This name acts as a namespace for operation and operand types.
+The driver uses this name to distinguish between extensions.
+
+Operations and data types are declared in a way similar to
+`../runtime/include/NeuralNetworks.h`:
+```c
+enum {
+ /**
+ * A custom scalar type.
+ */
+ MY_SCALAR = 0,
+
+ /**
+ * A custom tensor type.
+ *
+ * Attached to this tensor is {@link MyTensorParams}.
+ */
+ MY_TENSOR = 1,
+};
+
+enum {
+ /**
+ * Computes my function.
+ *
+ * Inputs:
+ * * 0: A scalar of {@link MY_SCALAR}.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link MY_TENSOR}.
+ */
+ MY_FUNCTION = 0,
+};
+```
+
+Extensions may also declare custom structures to accompany extension operands:
+```c
+/**
+ * Quantization parameters for {@link MY_TENSOR}.
+ */
+typedef struct MyTensorParams {
+ double scale;
+ int64_t zeroPoint;
+} MyTensorParams;
+```
+
+## Using extensions in NNAPI clients
+
+Runtime extension support is provided by
+`../runtime/include/NeuralNetworksExtensions.h` (C API) and
+`../runtime/include/NeuralNetworksWrapperExtensions.h` (C++ API).
+This section provides an overview of the former.
+
+Use `ANeuralNetworksDevice_getExtensionSupport` to check if a device supports
+an extension:
+```c
+bool isExtensionSupported;
+CHECK_EQ(ANeuralNetworksDevice_getExtensionSupport(device, MY_EXTENSION_NAME,
+ &isExtensionSupported),
+ ANEURALNETWORKS_NO_ERROR);
+if (isExtensionSupported) {
+ // The device supports the extension.
+ ...
+}
+```
+
+To build a model with an extension operand, use
+`ANeuralNetworksModel_getExtensionOperandType` to obtain the operand type.
+Then call `ANeuralNetworksModel_addOperand` as usual:
+```c
+int32_t type;
+CHECK_EQ(ANeuralNetworksModel_getExtensionOperandType(model, MY_EXTENSION_NAME, MY_TENSOR, &type),
+ ANEURALNETWORKS_NO_ERROR);
+ANeuralNetworksOperandType operandType{
+ .type = type,
+ .dimensionCount = dimensionCount,
+ .dimensions = dimensions,
+};
+CHECK_EQ(ANeuralNetworksModel_addOperand(model, &operandType), ANEURALNETWORKS_NO_ERROR);
+```
+
+Optionally, use `ANeuralNetworksModel_setOperandExtensionData` to
+associate additional data with an extension operand.
+```c
+MyTensorParams params{
+ .scale = 0.5,
+ .zeroPoint = 128,
+};
+CHECK_EQ(ANeuralNetworksModel_setOperandExtensionData(model, operandIndex, &params, sizeof(params)),
+ ANEURALNETWORKS_NO_ERROR);
+```
+
+To build a model with an extension operation, use
+`ANeuralNetworksModel_getExtensionOperationType` to obtain the operation type.
+Then call `ANeuralNetworksModel_addOperation` as usual:
+```c
+ANeuralNetworksOperationType type;
+CHECK_EQ(ANeuralNetworksModel_getExtensionOperationType(model, MY_EXTENSION_NAME, MY_OPERATION,
+ &type),
+ ANEURALNETWORKS_NO_ERROR);
+CHECK_EQ(ANeuralNetworksModel_addOperation(model, type, inputCount, inputs, outputCount, outputs),
+ ANEURALNETWORKS_NO_ERROR);
+```
+
+## Adding extension support to an NNAPI driver
+
+The driver reports supported extensions via the
+`IDevice::getSupportedExtensions()` method.
+For each supported extension, the returned list must contain an entry
+describing it:
+```c++
+Extension {
+ .name = MY_EXTENSION_NAME,
+ .operandTypes = {
+ {
+ .type = MY_SCALAR,
+ .isTensor = false,
+ .byteSize = 8,
+ },
+ {
+ .type = MY_TENSOR,
+ .isTensor = true,
+ .byteSize = 8,
+ },
+ },
+}
+```
+
+When handling operation and operand types, the driver must check the
+`Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX` high bits of the type.
+These bits constitute the extension _prefix_. A zero prefix means no extension,
+whereas a non-zero prefix maps uniquely within a model to an extension name via
+`model.extensionNameToPrefix`.
+The low `Model::ExtensionTypeEncoding::LOW_BITS_TYPE` bits of the type
+correspond to the type within the extension.
+
+The driver must validate operands and operations.
+
+Extension operands may have associated data in `operand.extraParams.extension`.
diff --git a/nn/extensions/test_vendor/fibonacci/Android.bp b/nn/extensions/test_vendor/fibonacci/Android.bp
new file mode 100644
index 000000000..3a4269b5f
--- /dev/null
+++ b/nn/extensions/test_vendor/fibonacci/Android.bp
@@ -0,0 +1,6 @@
+cc_library_headers {
+ name: "neuralnetworks_test_vendor_fibonacci_extension",
+ host_supported: false,
+ vendor_available: true,
+ export_include_dirs: ["."],
+}
diff --git a/nn/extensions/test_vendor/fibonacci/FibonacciExtension.h b/nn/extensions/test_vendor/fibonacci/FibonacciExtension.h
new file mode 100644
index 000000000..98da899bb
--- /dev/null
+++ b/nn/extensions/test_vendor/fibonacci/FibonacciExtension.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_EXTENSIONS_TEST_VENDOR_FIBONACCI_FIBONACCI_EXTENSION_H
+#define ANDROID_FRAMEWORKS_ML_NN_EXTENSIONS_TEST_VENDOR_FIBONACCI_FIBONACCI_EXTENSION_H
+
+/**
+ * A sample extension definition.
+ */
+
+const char TEST_VENDOR_FIBONACCI_EXTENSION_NAME[] = "com.example.fibonacci";
+
+/**
+ * Quantization parameters for {@link TEST_VENDOR_TENSOR_QUANT64_ASYMM}.
+ */
+typedef struct TestVendorQuant64AsymmParams {
+ double scale;
+ int64_t zeroPoint;
+} TestVendorQuant64AsymmParams;
+
+enum {
+ /**
+ * A signed 64-bit integer scalar value.
+ */
+ TEST_VENDOR_INT64 = 0,
+
+ /**
+ * A tensor of 64-bit unsigned integers that represent real numbers.
+ *
+ * Attached to this tensor is {@link TestVendorQuant64AsymmParams} that is
+ * used to convert the 64-bit bit integer to the real value and vice versa.
+ *
+ * The formula is:
+ * real_value = (integer_value - zeroPoint) * scale.
+ */
+ TEST_VENDOR_TENSOR_QUANT64_ASYMM = 1,
+};
+
+enum {
+ /**
+ * Computes the Fibonacci sequence up to n.
+ *
+ * Supported input types:
+ * - {@link TEST_VENDOR_INT64}
+ * - {@link ANEURALNETWORKS_TENSOR_FLOAT32} (must contain exactly 1 element)
+ *
+ * Supported output types:
+ * - {@link TEST_VENDOR_TENSOR_QUANT64_ASYMM}
+ * - {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A scalar n.
+ *
+ * Outputs:
+ * * 0: A 1-D tensor of size n.
+ */
+ TEST_VENDOR_FIBONACCI = 0,
+};
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_EXTENSIONS_TEST_VENDOR_FIBONACCI_FIBONACCI_EXTENSION_H
diff --git a/nn/runtime/Manager.h b/nn/runtime/Manager.h
index adfc907e3..acff8a1a8 100644
--- a/nn/runtime/Manager.h
+++ b/nn/runtime/Manager.h
@@ -106,23 +106,28 @@ class DeviceManager {
// Returns the singleton Cpu device.
static std::shared_ptr<Device> getCpuDevice();
- // These functions are solely intended for use by unit tests of
- // the introspection and control API.
- //
+ // The functions below are solely intended for use by unit tests.
+
// Register a test device.
void forTest_registerDevice(const char* name, const sp<V1_0::IDevice>& device) {
registerDevice(name, device);
}
+
// Re-initialize the list of available devices.
void forTest_reInitializeDeviceList() {
mDevices.clear();
mDevicesCpuOnly.clear();
findAvailableDevices();
}
+
// Make a test device
static std::shared_ptr<Device> forTest_makeDriverDevice(const std::string& name,
const sp<V1_0::IDevice>& device);
+ bool forTest_isCpuDevice(const ANeuralNetworksDevice* device) const {
+ return reinterpret_cast<const Device*>(device) == getCpuDevice().get();
+ }
+
private:
// Builds the list of available drivers and queries their capabilities.
DeviceManager();
diff --git a/nn/runtime/test/Android.bp b/nn/runtime/test/Android.bp
index f5cb60d8b..4d9aaaccd 100644
--- a/nn/runtime/test/Android.bp
+++ b/nn/runtime/test/Android.bp
@@ -109,6 +109,8 @@ cc_defaults {
"TestPartitioningRandom.cpp",
"TestIntrospectionControl.cpp",
"TestExtensions.cpp",
+ "fibonacci_extension/FibonacciExtensionTest.cpp",
+ "fibonacci_extension/FibonacciDriver.cpp",
],
static_libs: [
"libgmock",
@@ -121,6 +123,7 @@ cc_defaults {
],
header_libs: [
"libneuralnetworks_private_headers",
+ "neuralnetworks_test_vendor_fibonacci_extension",
],
test_suites: [
"general-tests",
diff --git a/nn/runtime/test/TestExtensions.cpp b/nn/runtime/test/TestExtensions.cpp
index bd182311e..10b305e2c 100644
--- a/nn/runtime/test/TestExtensions.cpp
+++ b/nn/runtime/test/TestExtensions.cpp
@@ -69,11 +69,16 @@ class ExtensionsTest : public ::testing::Test {
}
DeviceManager::get()->forTest_registerDevice(kTestDriverName, new TestDriver());
+ // Discover extensions provided by registered devices.
+ TypeManager::get()->forTest_reset();
mDevice = getDeviceByName(kTestDriverName);
ASSERT_NE(mDevice, nullptr);
}
- virtual void TearDown() { DeviceManager::get()->forTest_reInitializeDeviceList(); }
+ virtual void TearDown() {
+ DeviceManager::get()->forTest_reInitializeDeviceList();
+ TypeManager::get()->forTest_reset();
+ }
ANeuralNetworksDevice* getDeviceByName(const std::string& name) {
ANeuralNetworksDevice* result = nullptr;
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
new file mode 100644
index 000000000..7c1b2af87
--- /dev/null
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FibonacciDriver"
+
+#include "FibonacciDriver.h"
+
+#include "HalInterfaces.h"
+#include "NeuralNetworksExtensions.h"
+#include "OperationResolver.h"
+#include "OperationsUtils.h"
+#include "Utils.h"
+#include "ValidateHal.h"
+
+#include "FibonacciExtension.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+namespace {
+
+const uint8_t kLowBitsType = static_cast<uint8_t>(Model::ExtensionTypeEncoding::LOW_BITS_TYPE);
+const uint32_t kTypeWithinExtensionMask = (1 << kLowBitsType) - 1;
+
+namespace fibonacci_op {
+
+constexpr char kOperationName[] = "TEST_VENDOR_FIBONACCI";
+
+constexpr uint32_t kNumInputs = 1;
+constexpr uint32_t kInputN = 0;
+
+constexpr uint32_t kNumOutputs = 1;
+constexpr uint32_t kOutputTensor = 0;
+
+bool getFibonacciExtensionPrefix(const Model& model, uint16_t* prefix) {
+ NN_RET_CHECK_EQ(model.extensionNameToPrefix.size(), 1u); // Assumes no other extensions in use.
+ NN_RET_CHECK_EQ(model.extensionNameToPrefix[0].name, TEST_VENDOR_FIBONACCI_EXTENSION_NAME);
+ *prefix = model.extensionNameToPrefix[0].prefix;
+ return true;
+}
+
+bool isFibonacciOperation(const Operation& operation, const Model& model) {
+ int32_t operationType = static_cast<int32_t>(operation.type);
+ uint16_t prefix;
+ NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
+ NN_RET_CHECK_EQ(operationType, (prefix << kLowBitsType) | TEST_VENDOR_FIBONACCI);
+ return true;
+}
+
+bool validate(const Operation& operation, const Model& model) {
+ NN_RET_CHECK(isFibonacciOperation(operation, model));
+ NN_RET_CHECK_EQ(operation.inputs.size(), kNumInputs);
+ NN_RET_CHECK_EQ(operation.outputs.size(), kNumOutputs);
+ int32_t inputType = static_cast<int32_t>(model.operands[operation.inputs[0]].type);
+ int32_t outputType = static_cast<int32_t>(model.operands[operation.outputs[0]].type);
+ uint16_t prefix;
+ NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
+ NN_RET_CHECK(inputType == ((prefix << kLowBitsType) | TEST_VENDOR_INT64) ||
+ inputType == ANEURALNETWORKS_TENSOR_FLOAT32);
+ NN_RET_CHECK(outputType == ((prefix << kLowBitsType) | TEST_VENDOR_TENSOR_QUANT64_ASYMM) ||
+ outputType == ANEURALNETWORKS_TENSOR_FLOAT32);
+ return true;
+}
+
+bool prepare(IOperationExecutionContext* context) {
+ int64_t n;
+ if (context->getInputType(kInputN) == OperandType::TENSOR_FLOAT32) {
+ n = static_cast<int64_t>(context->getInputValue<float>(kInputN));
+ } else {
+ n = context->getInputValue<int64_t>(kInputN);
+ }
+ NN_RET_CHECK_GE(n, 1);
+ Shape output = context->getOutputShape(kOutputTensor);
+ output.dimensions = {static_cast<uint32_t>(n)};
+ return context->setOutputShape(kOutputTensor, output);
+}
+
+template <typename ScaleT, typename ZeroPointT, typename OutputT>
+bool compute(int32_t n, ScaleT outputScale, ZeroPointT outputZeroPoint, OutputT* output) {
+ // Compute the Fibonacci numbers.
+ if (n >= 1) {
+ output[0] = 1;
+ }
+ if (n >= 2) {
+ output[1] = 1;
+ }
+ if (n >= 3) {
+ for (int32_t i = 2; i < n; ++i) {
+ output[i] = output[i - 1] + output[i - 2];
+ }
+ }
+
+ // Quantize output.
+ for (int32_t i = 0; i < n; ++i) {
+ output[i] = output[i] / outputScale + outputZeroPoint;
+ }
+
+ return true;
+}
+
+bool execute(IOperationExecutionContext* context) {
+ int64_t n;
+ if (context->getInputType(kInputN) == OperandType::TENSOR_FLOAT32) {
+ n = static_cast<int64_t>(context->getInputValue<float>(kInputN));
+ } else {
+ n = context->getInputValue<int64_t>(kInputN);
+ }
+ if (context->getOutputType(kOutputTensor) == OperandType::TENSOR_FLOAT32) {
+ float* output = context->getOutputBuffer<float>(kOutputTensor);
+ return compute(n, /*scale=*/1.0, /*zeroPoint=*/0, output);
+ } else {
+ uint64_t* output = context->getOutputBuffer<uint64_t>(kOutputTensor);
+ Shape outputShape = context->getOutputShape(kOutputTensor);
+ auto outputQuant = reinterpret_cast<const TestVendorQuant64AsymmParams*>(
+ outputShape.extraParams.extension().data());
+ return compute(n, outputQuant->scale, outputQuant->zeroPoint, output);
+ }
+}
+
+} // namespace fibonacci_op
+} // namespace
+
+const OperationRegistration* FibonacciOperationResolver::findOperation(
+ OperationType operationType) const {
+ // .validate is omitted because it's not used by the extension driver.
+ static OperationRegistration operationRegistration(operationType, fibonacci_op::kOperationName,
+ nullptr, fibonacci_op::prepare,
+ fibonacci_op::execute, {});
+ uint16_t prefix = static_cast<int32_t>(operationType) >> kLowBitsType;
+ uint16_t typeWithinExtension = static_cast<int32_t>(operationType) & kTypeWithinExtensionMask;
+ // Assumes no other extensions in use.
+ return prefix != 0 && typeWithinExtension == TEST_VENDOR_FIBONACCI ? &operationRegistration
+ : nullptr;
+}
+
+Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
+ cb(ErrorStatus::NONE,
+ {
+ {
+ .name = TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ .operandTypes =
+ {
+ {
+ .type = TEST_VENDOR_INT64,
+ .isTensor = false,
+ .byteSize = 8,
+ },
+ {
+ .type = TEST_VENDOR_TENSOR_QUANT64_ASYMM,
+ .isTensor = true,
+ .byteSize = 8,
+ },
+ },
+ },
+ });
+ return Void();
+}
+
+Return<void> FibonacciDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+ android::nn::initVLogMask();
+ VLOG(DRIVER) << "getCapabilities()";
+ static const PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
+ Capabilities capabilities = {.relaxedFloat32toFloat16PerformanceScalar = kPerf,
+ .relaxedFloat32toFloat16PerformanceTensor = kPerf,
+ .operandPerformance = nonExtensionOperandPerformance(kPerf)};
+ cb(ErrorStatus::NONE, capabilities);
+ return Void();
+}
+
+Return<void> FibonacciDriver::getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) {
+ VLOG(DRIVER) << "getSupportedOperations()";
+ if (!validateModel(model)) {
+ cb(ErrorStatus::INVALID_ARGUMENT, {});
+ return Void();
+ }
+ const size_t count = model.operations.size();
+ std::vector<bool> supported(count);
+ for (size_t i = 0; i < count; ++i) {
+ const Operation& operation = model.operations[i];
+ if (fibonacci_op::isFibonacciOperation(operation, model)) {
+ if (!fibonacci_op::validate(operation, model)) {
+ cb(ErrorStatus::INVALID_ARGUMENT, {});
+ return Void();
+ }
+ supported[i] = true;
+ }
+ }
+ cb(ErrorStatus::NONE, supported);
+ return Void();
+}
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.h b/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
new file mode 100644
index 000000000..c074c0d79
--- /dev/null
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FIBONACCI_EXTENSION_FIBONACCI_DRIVER_H
+#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FIBONACCI_EXTENSION_FIBONACCI_DRIVER_H
+
+#include "OperationResolver.h"
+#include "SampleDriver.h"
+
+namespace android {
+namespace nn {
+namespace sample_driver {
+
+class FibonacciOperationResolver : public IOperationResolver {
+ DISALLOW_COPY_AND_ASSIGN(FibonacciOperationResolver);
+
+ public:
+ static const FibonacciOperationResolver* get() {
+ static FibonacciOperationResolver instance;
+ return &instance;
+ }
+
+ const OperationRegistration* findOperation(OperationType operationType) const override;
+
+ private:
+ FibonacciOperationResolver() {}
+};
+
+// A driver that implements FibonacciExtension.
+// The only supported operation is TEST_VENDOR_FIBONACCI.
+class FibonacciDriver : public SampleDriver {
+ public:
+ FibonacciDriver() : SampleDriver(kDriverName, FibonacciOperationResolver::get()) {}
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) override;
+
+ static constexpr char kDriverName[] = "sample-driver-fibonacci-extension";
+};
+
+} // namespace sample_driver
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FIBONACCI_EXTENSION_FIBONACCI_DRIVER_H
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp b/nn/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp
new file mode 100644
index 000000000..04ccfca07
--- /dev/null
+++ b/nn/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "HalInterfaces.h"
+#include "Manager.h"
+#include "NeuralNetworks.h"
+#include "NeuralNetworksExtensions.h"
+#include "NeuralNetworksWrapperExtensions.h"
+#include "TestNeuralNetworksWrapper.h"
+#include "TypeManager.h"
+#include "Utils.h"
+#include "ValidateHal.h"
+
+#include <gtest/gtest.h>
+
+#include "FibonacciDriver.h"
+#include "FibonacciExtension.h"
+
+namespace android {
+namespace nn {
+namespace {
+
+using ::android::nn::test_wrapper::ExtensionModel;
+using ::android::nn::test_wrapper::ExtensionOperandParams;
+using ::android::nn::test_wrapper::ExtensionOperandType;
+using ::android::nn::test_wrapper::Type;
+
+class FibonacciExtensionTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ if (DeviceManager::get()->getUseCpuOnly()) {
+ // This test requires the use a custom driver.
+ GTEST_SKIP();
+ }
+
+ // Real world extension tests should run against actual hardware
+ // implementations, but there is no hardware supporting the test
+ // extension. Hence the sample software driver.
+ DeviceManager::get()->forTest_registerDevice(sample_driver::FibonacciDriver::kDriverName,
+ new sample_driver::FibonacciDriver());
+ // Discover extensions provided by registered devices.
+ TypeManager::get()->forTest_reset();
+
+ uint32_t numDevices = 0;
+ ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+ ANeuralNetworksDevice* fibonacciDevice = nullptr;
+ ANeuralNetworksDevice* cpuDevice = nullptr;
+ for (uint32_t i = 0; i < numDevices; i++) {
+ ANeuralNetworksDevice* device = nullptr;
+ EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+ bool supportsFibonacciExtension;
+ ASSERT_EQ(ANeuralNetworksDevice_getExtensionSupport(
+ device, TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ &supportsFibonacciExtension),
+ ANEURALNETWORKS_NO_ERROR);
+ if (supportsFibonacciExtension) {
+ ASSERT_EQ(fibonacciDevice, nullptr) << "Found multiple Fibonacci drivers";
+ fibonacciDevice = device;
+ } else if (DeviceManager::get()->forTest_isCpuDevice(device)) {
+ ASSERT_EQ(cpuDevice, nullptr) << "Found multiple CPU drivers";
+ cpuDevice = device;
+ }
+ }
+ ASSERT_NE(fibonacciDevice, nullptr) << "Expecting Fibonacci driver to be available";
+ ASSERT_NE(cpuDevice, nullptr) << "Expecting CPU driver to be available";
+ mDevices = {fibonacciDevice, cpuDevice};
+ }
+
+ virtual void TearDown() {
+ if (mExecution) {
+ ANeuralNetworksExecution_free(mExecution);
+ }
+ if (mCompilation) {
+ ANeuralNetworksCompilation_free(mCompilation);
+ }
+ DeviceManager::get()->forTest_reInitializeDeviceList();
+ TypeManager::get()->forTest_reset();
+ }
+
+ void checkSupportedOperations(const std::vector<bool>& expected) {
+ const uint32_t kMaxNumberOperations = 256;
+ EXPECT_LE(expected.size(), kMaxNumberOperations);
+ bool supported[kMaxNumberOperations] = {false};
+ EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(
+ mModel.getHandle(), mDevices.data(), mDevices.size(), supported),
+ ANEURALNETWORKS_NO_ERROR);
+ for (size_t i = 0; i < expected.size(); ++i) {
+ SCOPED_TRACE(::testing::Message() << "i = " << i);
+ EXPECT_EQ(supported[i], expected[i]);
+ }
+ }
+
+ void prepareForExecution() {
+ ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
+ mDevices.size(), &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
+ ANEURALNETWORKS_NO_ERROR);
+ }
+
+ std::vector<ANeuralNetworksDevice*> mDevices;
+ ANeuralNetworksExecution* mExecution = nullptr;
+ ANeuralNetworksCompilation* mCompilation = nullptr;
+ ExtensionModel mModel;
+};
+
+void addNopOperation(ExtensionModel* model, ExtensionOperandType inputType, uint32_t input,
+ uint32_t output) {
+ // Our NOP operation is ADD, which has no extension type support.
+ ASSERT_EQ(inputType.operandType.type, ANEURALNETWORKS_TENSOR_FLOAT32);
+ ASSERT_EQ(inputType.dimensions.size(), 1u);
+
+ uint32_t inputZeros = model->addOperand(&inputType);
+ uint32_t inputSize = inputType.dimensions[0];
+ uint32_t inputLength = sizeof(float) * inputSize;
+ const float kZeros[100] = {};
+ ASSERT_GE(sizeof(kZeros), inputLength);
+ model->setOperandValue(inputZeros, &kZeros, inputLength);
+
+ ExtensionOperandType scalarType(Type::INT32, {});
+ uint32_t activation = model->addOperand(&scalarType);
+ int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE;
+ model->setOperandValue(activation, &kNoActivation, sizeof(kNoActivation));
+
+ model->addOperation(ANEURALNETWORKS_ADD, {input, inputZeros, activation}, {output});
+}
+
+void createModel(ExtensionModel* model, ExtensionOperandType inputType,
+ ExtensionOperandType outputType, bool addNopOperations) {
+ uint32_t fibonacciInput = model->addOperand(&inputType);
+ uint32_t fibonacciOutput = model->addOperand(&outputType);
+
+ uint32_t modelInput = addNopOperations ? model->addOperand(&inputType) : fibonacciInput;
+ uint32_t modelOutput = addNopOperations ? model->addOperand(&outputType) : fibonacciOutput;
+
+ if (addNopOperations) {
+ addNopOperation(model, inputType, modelInput, fibonacciInput);
+ }
+ model->addOperation(model->getExtensionOperationType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ TEST_VENDOR_FIBONACCI),
+ {fibonacciInput}, {fibonacciOutput});
+ if (addNopOperations) {
+ addNopOperation(model, outputType, fibonacciOutput, modelOutput);
+ }
+
+ model->identifyInputsAndOutputs({modelInput}, {modelOutput});
+ model->finish();
+ ASSERT_TRUE(model->isValid());
+}
+
+TEST_F(FibonacciExtensionTest, ModelWithExtensionOperandTypes) {
+ constexpr uint32_t N = 10;
+ constexpr double scale = 0.5;
+ constexpr int64_t zeroPoint = 10;
+
+ ExtensionOperandType inputType(
+ static_cast<Type>(mModel.getExtensionOperandType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ TEST_VENDOR_INT64)),
+ {});
+ ExtensionOperandType outputType(
+ static_cast<Type>(mModel.getExtensionOperandType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ TEST_VENDOR_TENSOR_QUANT64_ASYMM)),
+ {N},
+ ExtensionOperandParams(TestVendorQuant64AsymmParams{
+ .scale = scale,
+ .zeroPoint = zeroPoint,
+ }));
+ createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
+ checkSupportedOperations({true});
+ prepareForExecution();
+
+ int64_t input = N;
+ EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
+ ANEURALNETWORKS_NO_ERROR);
+
+ int64_t output[N] = {};
+ EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
+ ANEURALNETWORKS_NO_ERROR);
+
+ ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
+
+ EXPECT_EQ(output[0], 1 / scale + zeroPoint);
+ EXPECT_EQ(output[1], 1 / scale + zeroPoint);
+ EXPECT_EQ(output[2], 2 / scale + zeroPoint);
+ EXPECT_EQ(output[3], 3 / scale + zeroPoint);
+ EXPECT_EQ(output[4], 5 / scale + zeroPoint);
+ EXPECT_EQ(output[5], 8 / scale + zeroPoint);
+ EXPECT_EQ(output[6], 13 / scale + zeroPoint);
+ EXPECT_EQ(output[7], 21 / scale + zeroPoint);
+ EXPECT_EQ(output[8], 34 / scale + zeroPoint);
+ EXPECT_EQ(output[9], 55 / scale + zeroPoint);
+}
+
+TEST_F(FibonacciExtensionTest, ModelWithTemporaries) {
+ constexpr uint32_t N = 10;
+
+ ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
+ ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {N});
+ createModel(&mModel, inputType, outputType, /*addNopOperations=*/true);
+ checkSupportedOperations({true, true, true});
+ prepareForExecution();
+
+ float input[] = {N};
+ EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
+ ANEURALNETWORKS_NO_ERROR);
+
+ float output[N] = {};
+ EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
+ ANEURALNETWORKS_NO_ERROR);
+
+ ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
+
+ EXPECT_EQ(output[0], 1);
+ EXPECT_EQ(output[1], 1);
+ EXPECT_EQ(output[2], 2);
+ EXPECT_EQ(output[3], 3);
+ EXPECT_EQ(output[4], 5);
+ EXPECT_EQ(output[5], 8);
+ EXPECT_EQ(output[6], 13);
+ EXPECT_EQ(output[7], 21);
+ EXPECT_EQ(output[8], 34);
+ EXPECT_EQ(output[9], 55);
+}
+
+TEST_F(FibonacciExtensionTest, InvalidInputType) {
+ ExtensionOperandType inputType(Type::TENSOR_INT32, {1}); // Unsupported type.
+ ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
+ createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
+ checkSupportedOperations({false}); // The driver reports that it doesn't support the operation.
+ ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
+ mDevices.size(), &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
+}
+
+TEST_F(FibonacciExtensionTest, InvalidOutputType) {
+ ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
+ ExtensionOperandType outputType(Type::TENSOR_INT32, {1}); // Unsupported type.
+ createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
+ checkSupportedOperations({false}); // The driver reports that it doesn't support the operation.
+ ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
+ mDevices.size(), &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
+}
+
+TEST_F(FibonacciExtensionTest, InvalidInputValue) {
+ ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
+ ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
+ createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
+ checkSupportedOperations({true});
+ prepareForExecution();
+
+ float input[] = {-1}; // Invalid input value.
+ EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
+ ANEURALNETWORKS_NO_ERROR);
+
+ float output[1] = {};
+ EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
+ ANEURALNETWORKS_NO_ERROR);
+
+ ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_OP_FAILED);
+}
+
+TEST_F(FibonacciExtensionTest, InvalidNumInputs) {
+ ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
+ ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
+ uint32_t input1 = mModel.addOperand(&inputType);
+ uint32_t input2 = mModel.addOperand(&inputType); // Extra input.
+ uint32_t output = mModel.addOperand(&outputType);
+ mModel.addOperation(mModel.getExtensionOperationType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ TEST_VENDOR_FIBONACCI),
+ {input1, input2}, {output});
+ mModel.identifyInputsAndOutputs({input1, input2}, {output});
+ mModel.finish();
+ ASSERT_TRUE(mModel.isValid());
+ checkSupportedOperations({false});
+ ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
+ mDevices.size(), &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
+}
+
+TEST_F(FibonacciExtensionTest, InvalidNumOutputs) {
+ ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
+ ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
+ uint32_t input = mModel.addOperand(&inputType);
+ uint32_t output1 = mModel.addOperand(&outputType);
+ uint32_t output2 = mModel.addOperand(&outputType); // Extra output.
+ mModel.addOperation(mModel.getExtensionOperationType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ TEST_VENDOR_FIBONACCI),
+ {input}, {output1, output2});
+ mModel.identifyInputsAndOutputs({input}, {output1, output2});
+ mModel.finish();
+ ASSERT_TRUE(mModel.isValid());
+ checkSupportedOperations({false});
+ ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
+ mDevices.size(), &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
+}
+
+TEST_F(FibonacciExtensionTest, InvalidOperation) {
+ ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
+ ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
+ uint32_t input = mModel.addOperand(&inputType);
+ uint32_t output = mModel.addOperand(&outputType);
+ mModel.addOperation(mModel.getExtensionOperationType(
+ TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
+ TEST_VENDOR_FIBONACCI + 1), // This operation should not exist.
+ {input}, {output});
+ mModel.identifyInputsAndOutputs({input}, {output});
+ mModel.finish();
+ ASSERT_TRUE(mModel.isValid());
+ checkSupportedOperations({false});
+ ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
+ mDevices.size(), &mCompilation),
+ ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
+}
+
+} // namespace
+} // namespace nn
+} // namespace android