aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 01:09:31 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 01:09:31 +0000
commit714a65a41d09d10286a82b70c6ed631e62240067 (patch)
tree8342e551a24c17f9026d737cc634678115bba1f8
parent05d4746a2db7f279b06df6b7a4aa6c77c80944a2 (diff)
parent7aac29a8f2ea00dea08c556da57448a47d77a783 (diff)
downloadNeuralNetworks-714a65a41d09d10286a82b70c6ed631e62240067.tar.gz
Snap for 10447354 from 7aac29a8f2ea00dea08c556da57448a47d77a783 to mainline-resolv-releaseaml_res_340912000
Change-Id: I3a4626e982ab72a61a9f5e0ff93cb306212d7c56
-rw-r--r--Android.bp31
-rw-r--r--NNAPI_OWNERS2
-rw-r--r--TEST_MAPPING17
-rw-r--r--common/Android.bp5
-rw-r--r--common/cpu_operations/LSTM.cpp5
-rw-r--r--common/random/Android.bp2
-rw-r--r--common/random/random.cc2
-rw-r--r--common/types/include/nnapi/Result.h4
-rw-r--r--common/types/include/nnapi/TypeUtils.h8
-rw-r--r--driver/sample/Android.bp36
-rw-r--r--driver/sample/Fuzzer.cpp57
-rw-r--r--driver/sample_aidl/SampleDriverAidlLimited.cpp4
-rw-r--r--driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-limited.xml3
-rw-r--r--driver/sample_shim/Android.bp3
-rwxr-xr-xdriver/sample_shim/android_riscv64/neuralnetworks_sample_sl_driver_prebuilt.sobin0 -> 3625072 bytes
-rwxr-xr-xdriver/sample_shim/generate_prebuilts.sh2
-rw-r--r--runtime/Android.bp54
-rw-r--r--runtime/FlatbufferModelBuilder.cpp124
-rw-r--r--runtime/FlatbufferModelBuilder.h65
-rw-r--r--runtime/FlatbufferModelBuilderUtils.h171
-rw-r--r--runtime/NeuralNetworks.cpp39
-rw-r--r--runtime/NeuralNetworksV2.cpp1674
-rw-r--r--runtime/operation_converters/AddOperationConverter.cpp62
-rw-r--r--runtime/operation_converters/AddOperationConverter.h35
-rw-r--r--runtime/operation_converters/ArithmeticOperationConverter.cpp46
-rw-r--r--runtime/operation_converters/ArithmeticOperationConverter.h49
-rw-r--r--runtime/operation_converters/Conv2DOperationConverter.cpp244
-rw-r--r--runtime/operation_converters/Conv2DOperationConverter.h62
-rw-r--r--runtime/operation_converters/DepthwiseConv2DOperationConverter.cpp132
-rw-r--r--runtime/operation_converters/DepthwiseConv2DOperationConverter.h45
-rw-r--r--runtime/operation_converters/LogisticOperationConverter.cpp65
-rw-r--r--runtime/operation_converters/LogisticOperationConverter.h47
-rw-r--r--runtime/operation_converters/OperationConverter.h35
-rw-r--r--runtime/operation_converters/OperationConverterResolver.cpp166
-rw-r--r--runtime/operation_converters/OperationConverterResolver.h61
-rw-r--r--runtime/operation_converters/SubGraphContext.cpp214
-rw-r--r--runtime/operation_converters/SubGraphContext.h110
-rw-r--r--runtime/packageinfo/libneuralnetworks_packageinfo.map.txt4
-rw-r--r--runtime/test/Android.bp91
-rw-r--r--runtime/test/AndroidTest_NeuralNetworksTest_v2_static.xml34
-rw-r--r--runtime/test/SupportLibraryTestGenerated.cpp54
-rw-r--r--runtime/test/TestCompatibilityLayer.cpp251
-rw-r--r--runtime/test/android_fuzzing/Android.bp2
-rw-r--r--runtime/test/fuzzing/RandomGraphGeneratorUtils.h27
-rw-r--r--shim_and_sl/ShimConverter.cpp10
-rw-r--r--shim_and_sl/ShimPreparedModel.cpp8
-rw-r--r--tools/nnapi_info/Android.bp37
-rw-r--r--tools/nnapi_info/nnapi_info.cpp106
-rw-r--r--tools/test_generator/test_harness/include/TestHarness.h2
49 files changed, 4214 insertions, 93 deletions
diff --git a/Android.bp b/Android.bp
index 96188d64a..072045506 100644
--- a/Android.bp
+++ b/Android.bp
@@ -36,38 +36,7 @@ license {
}
cc_defaults {
- name: "neuralnetworks_float16",
- // Note: the newlines in the "cflags" sections are intentional to ensure
- // bpfmt -w -s does not change the order of the compiler flags.
- arch: {
- x86: {
- cflags: [
- "-D_Float16=__fp16",
-
- "-Xclang",
- "-fnative-half-type",
-
- "-Xclang",
- "-fallow-half-arguments-and-returns",
- ],
- },
- x86_64: {
- cflags: [
- "-D_Float16=__fp16",
-
- "-Xclang",
- "-fnative-half-type",
-
- "-Xclang",
- "-fallow-half-arguments-and-returns",
- ],
- },
- },
-}
-
-cc_defaults {
name: "neuralnetworks_defaults",
- defaults: ["neuralnetworks_float16"],
cflags: [
"-O3",
"-Wall",
diff --git a/NNAPI_OWNERS b/NNAPI_OWNERS
index 7e6ea31af..465bb0876 100644
--- a/NNAPI_OWNERS
+++ b/NNAPI_OWNERS
@@ -1,8 +1,6 @@
butlermichael@google.com
dgross@google.com
-galarragas@google.com
ianhua@google.com
-jeanluc@google.com
mattalexander@google.com
miaowang@google.com
pszczepaniak@google.com
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 63d3aed9d..14b9083ed 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -29,6 +29,23 @@
}
]
},
+ // TODO(b/244359503): Re-enable once the conversion layer is fixed.
+ // {
+ // "name": "NeuralNetworksTest_v2_static",
+ // "options": [
+ // {
+ // // Restrict NeuralNetworksTest_v2_static to run only a single
+ // // pass consisting of:
+ // // * useCpuOnly = 0
+ // // * computeMode = ComputeMode::ASYNC
+ // //
+ // // The value here is a bitmask indicating only "pass 2"
+ // // should be run (4 = 2^2). The bit conversions can be
+ // // found in packages/modules/NeuralNetworks/runtime/test/TestMain.cpp.
+ // "native-test-flag": "4"
+ // }
+ // ]
+ // },
{
"name": "CtsNNAPITestCases"
}
diff --git a/common/Android.bp b/common/Android.bp
index 159ceee96..108cbe102 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -228,6 +228,7 @@ cc_library_static {
defaults: [
"libneuralnetworks_common_defaults",
],
+ min_sdk_version: "30",
}
cc_library_static {
@@ -362,7 +363,6 @@ cc_library_static {
cc_defaults {
name: "NeuralNetworksTest_common",
defaults: [
- "neuralnetworks_float16",
"neuralnetworks_use_latest_utils_hal_aidl",
],
host_supported: true,
@@ -416,6 +416,9 @@ cc_test {
"philox_random_headers",
"tensorflow_headers",
],
+ test_suites: [
+ "general-tests",
+ ],
}
cc_test {
diff --git a/common/cpu_operations/LSTM.cpp b/common/cpu_operations/LSTM.cpp
index 8c1a4c254..66b6bee41 100644
--- a/common/cpu_operations/LSTM.cpp
+++ b/common/cpu_operations/LSTM.cpp
@@ -18,11 +18,6 @@
#include "LSTM.h"
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunused-parameter"
-#include <tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h>
-#pragma clang diagnostic pop
-
#include <tensorflow/lite/kernels/internal/tensor_utils.h>
#include <vector>
diff --git a/common/random/Android.bp b/common/random/Android.bp
index df2b472e9..a05b8336c 100644
--- a/common/random/Android.bp
+++ b/common/random/Android.bp
@@ -31,6 +31,7 @@ cc_library_headers {
"com.android.neuralnetworks",
"test_com.android.neuralnetworks",
],
+ min_sdk_version: "30",
sdk_version: "current",
}
@@ -38,6 +39,7 @@ cc_library_static {
name: "philox_random",
host_supported: true,
vendor_available: true,
+ min_sdk_version: "30",
apex_available: [
"//apex_available:platform",
"com.android.neuralnetworks",
diff --git a/common/random/random.cc b/common/random/random.cc
index 9d4b33f57..168396ecf 100644
--- a/common/random/random.cc
+++ b/common/random/random.cc
@@ -15,8 +15,8 @@ limitations under the License.
#include "random.h"
-#include <tensorflow/core/platform/mutex.h>
#include <tensorflow/core/platform/types.h>
+
#include <random>
namespace tensorflow {
diff --git a/common/types/include/nnapi/Result.h b/common/types/include/nnapi/Result.h
index 698ab70fb..2d2710678 100644
--- a/common/types/include/nnapi/Result.h
+++ b/common/types/include/nnapi/Result.h
@@ -135,8 +135,8 @@ std::nullopt_t nnTryGetError(std::optional<T> /*o*/) {
* following functions for the type:
* * `::android::nn::nnTryHasValue` returns `true` if the `expr` holds a successful value, false if
* the `expr` value holds an error
- * * `::android::nn::nnTryGetError` returns the successful value of `expr` or crashes
- * * `::android::nn::nnTryGetValue` returns the error value of `expr` or crashes
+ * * `::android::nn::nnTryGetError` returns the error value of `expr` or crashes
+ * * `::android::nn::nnTryGetValue` returns the successful value of `expr` or crashes
*
* Usage at call site:
* const auto [a, b, c] = NN_TRY(failableFunction(args));
diff --git a/common/types/include/nnapi/TypeUtils.h b/common/types/include/nnapi/TypeUtils.h
index 87e7c3f3b..b6964fc9e 100644
--- a/common/types/include/nnapi/TypeUtils.h
+++ b/common/types/include/nnapi/TypeUtils.h
@@ -289,6 +289,14 @@ class NnRetCheckErrorStream {
return result;
}
+ // This is needed because conversion to Result<int> is ambiguous
+ // due to the above bool() operator overload
+ operator Result<int>() { // NOLINT(google-explicit-constructor)
+ auto result = base::unexpected(std::move(mBuffer)->str());
+ mBuffer.reset();
+ return result;
+ }
+
private:
std::optional<std::ostringstream> mBuffer = std::ostringstream{};
};
diff --git a/driver/sample/Android.bp b/driver/sample/Android.bp
index 286cdcd04..85a29c37d 100644
--- a/driver/sample/Android.bp
+++ b/driver/sample/Android.bp
@@ -112,3 +112,39 @@ cc_library_shared {
"libneuralnetworks_cl",
],
}
+
+cc_fuzz {
+ name: "android.hardware.neuralnetworks-service.example_fuzzer",
+ host_supported: true,
+ defaults: [
+ "neuralnetworks_defaults",
+ "neuralnetworks_use_latest_utils_hal_aidl",
+ "service_fuzzer_defaults",
+ ],
+ header_libs: [
+ "libneuralnetworks_headers",
+ ],
+ shared_libs: [
+ "liblog",
+ "libtextclassifier_hash",
+ ],
+ static_libs: [
+ "libaidlcommonsupport",
+ "libneuralnetworks_common",
+ "neuralnetworks_canonical_sample_driver",
+ "neuralnetworks_utils_hal_adapter_aidl",
+ ],
+ target: {
+ android: {
+ shared_libs: [
+ "libnativewindow",
+ ],
+ },
+ },
+ srcs: ["Fuzzer.cpp"],
+ fuzz_config: {
+ cc: [
+ "butlermichael@google.com",
+ ],
+ },
+}
diff --git a/driver/sample/Fuzzer.cpp b/driver/sample/Fuzzer.cpp
new file mode 100644
index 000000000..19794103d
--- /dev/null
+++ b/driver/sample/Fuzzer.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Fuzzer"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <fuzzbinder/libbinder_ndk_driver.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <nnapi/hal/aidl/Adapter.h>
+
+#include <memory>
+#include <string>
+
+#include "CanonicalDevice.h"
+
+namespace aidl::android::hardware::neuralnetworks::fuzzer {
+namespace {
+
+std::shared_ptr<BnDevice> makeDevice() {
+ const std::string name = "nnapi-sample";
+ auto device = std::make_shared<::android::nn::sample::Device>(name);
+ return adapter::adapt(std::move(device));
+}
+
+void limitLoggingToCrashes() {
+ [[maybe_unused]] static const auto oldSeverity = ::android::base::SetMinimumLogSeverity(
+ ::android::base::LogSeverity::FATAL_WITHOUT_ABORT);
+}
+
+} // namespace
+} // namespace aidl::android::hardware::neuralnetworks::fuzzer
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ // Limit NNAPI fuzz test logging to crashes (which is what the test cares about) to reduce the
+ // noise and potentially speed up testing.
+ aidl::android::hardware::neuralnetworks::fuzzer::limitLoggingToCrashes();
+
+ // Initialize the Device under test when LLVMFuzzerTestOneInput is first called, and reuse it in
+ // later calls.
+ static const auto device = aidl::android::hardware::neuralnetworks::fuzzer::makeDevice();
+
+ android::fuzzService(device->asBinder().get(), FuzzedDataProvider(data, size));
+ return 0;
+}
diff --git a/driver/sample_aidl/SampleDriverAidlLimited.cpp b/driver/sample_aidl/SampleDriverAidlLimited.cpp
index 08264d879..4b92f7701 100644
--- a/driver/sample_aidl/SampleDriverAidlLimited.cpp
+++ b/driver/sample_aidl/SampleDriverAidlLimited.cpp
@@ -59,6 +59,10 @@ int main() {
CHECK_EQ(devices.size(), aidlDevices.size());
for (size_t i = 0; i < aidlDevices.size(); ++i) {
const std::string name = devices[i]->getName();
+ if (name != "nnapi-sample_quant") {
+ continue;
+ }
+
const std::string fqName = std::string(AidlIDevice::descriptor) + "/" + name;
const binder_status_t status =
AServiceManager_addService(aidlDevices[i]->asBinder().get(), fqName.c_str());
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-limited.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-limited.xml
index 2f74f2dca..f57117ebb 100644
--- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-limited.xml
+++ b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-limited.xml
@@ -2,9 +2,6 @@
<hal format="aidl">
<name>android.hardware.neuralnetworks</name>
<version>4</version>
- <fqname>IDevice/nnapi-sample_float_fast</fqname>
- <fqname>IDevice/nnapi-sample_float_slow</fqname>
- <fqname>IDevice/nnapi-sample_minimal</fqname>
<fqname>IDevice/nnapi-sample_quant</fqname>
</hal>
</manifest>
diff --git a/driver/sample_shim/Android.bp b/driver/sample_shim/Android.bp
index 249ead9e4..b0659ece0 100644
--- a/driver/sample_shim/Android.bp
+++ b/driver/sample_shim/Android.bp
@@ -49,6 +49,9 @@ cc_prebuilt_library_shared {
android_arm: {
srcs: ["android_arm/neuralnetworks_sample_sl_driver_prebuilt.so"],
},
+ android_riscv64: {
+ srcs: ["android_riscv64/neuralnetworks_sample_sl_driver_prebuilt.so"],
+ },
},
apex_available: ["//apex_available:platform"],
}
diff --git a/driver/sample_shim/android_riscv64/neuralnetworks_sample_sl_driver_prebuilt.so b/driver/sample_shim/android_riscv64/neuralnetworks_sample_sl_driver_prebuilt.so
new file mode 100755
index 000000000..63e5a11c2
--- /dev/null
+++ b/driver/sample_shim/android_riscv64/neuralnetworks_sample_sl_driver_prebuilt.so
Binary files differ
diff --git a/driver/sample_shim/generate_prebuilts.sh b/driver/sample_shim/generate_prebuilts.sh
index 812a5a9cc..5b4b89f1b 100755
--- a/driver/sample_shim/generate_prebuilts.sh
+++ b/driver/sample_shim/generate_prebuilts.sh
@@ -13,7 +13,7 @@ set -e
cd $ANDROID_BUILD_TOP
source build/envsetup.sh
-ARCHS="x86,arm,arm64,x86_64"
+ARCHS="x86,arm,arm64,x86_64,riscv64"
SAMPLE_SL_DRIVER="neuralnetworks_sample_sl_driver"
for arch in ${ARCHS//,/ }
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 7bc3b1b9d..21ec0b9af 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -54,6 +54,7 @@ cc_library_headers {
host_supported: true,
vendor_available: true,
export_include_dirs: ["include"],
+ min_sdk_version: "30",
apex_available: [
"com.android.neuralnetworks",
"test_com.android.neuralnetworks", // Due to the dependency from libneuralnetworks_common
@@ -105,7 +106,6 @@ cc_defaults {
version_script: "libneuralnetworks.map.txt",
generated_sources: ["statslog_neuralnetworks.cpp"],
generated_headers: ["statslog_neuralnetworks.h"],
- export_generated_headers: ["statslog_neuralnetworks.h"],
srcs: [
"TelemetryStatsd.cpp",
],
@@ -170,6 +170,35 @@ cc_defaults {
],
}
+cc_defaults {
+ name: "libneuralnetworks_v2_defaults",
+ defaults: ["libneuralnetworks_defaults"],
+ srcs: [
+ "FlatbufferModelBuilder.cpp",
+ "NeuralNetworksV2.cpp",
+ "operation_converters/AddOperationConverter.cpp",
+ "operation_converters/ArithmeticOperationConverter.cpp",
+ "operation_converters/Conv2DOperationConverter.cpp",
+ "operation_converters/DepthwiseConv2DOperationConverter.cpp",
+ "operation_converters/LogisticOperationConverter.cpp",
+ "operation_converters/OperationConverterResolver.cpp",
+ "operation_converters/SubGraphContext.cpp",
+ ],
+
+ exclude_srcs: [
+ "NeuralNetworks.cpp",
+ ],
+
+ static_libs: [
+ "libtflite_static",
+ ],
+
+ include_dirs: [
+ "external/flatbuffers/include",
+ "external/tensorflow",
+ ],
+}
+
cc_library_shared {
name: "libneuralnetworks",
llndk: {
@@ -180,11 +209,11 @@ cc_library_shared {
"libneuralnetworks_defaults",
"neuralnetworks_defaults",
],
+ min_sdk_version: "30",
apex_available: [
"com.android.neuralnetworks",
"test_com.android.neuralnetworks",
],
-
stubs: {
versions: [
"30",
@@ -225,6 +254,24 @@ cc_library_static {
}
cc_library_static {
+ name: "libneuralnetworks_v2_static_experimental",
+ defaults: [
+ "libneuralnetworks_v2_defaults",
+ "neuralnetworks_defaults",
+ ],
+ exclude_static_libs: [
+ "libneuralnetworks_common",
+ "neuralnetworks_types",
+ "server_configurable_flags",
+ ],
+ static_libs: [
+ "libneuralnetworks_common_experimental",
+ "neuralnetworks_types_experimental",
+ ],
+ cflags: ["-DNN_EXPERIMENTAL_FEATURE"],
+}
+
+cc_library_static {
name: "libneuralnetworks_cl",
defaults: [
"neuralnetworks_cl_defaults",
@@ -285,6 +332,9 @@ ndk_library {
symbol_file: "libneuralnetworks.map.txt",
// Android O-MR1
first_version: "27",
+ export_header_libs: [
+ "libneuralnetworks_ndk_headers",
+ ],
}
genrule {
diff --git a/runtime/FlatbufferModelBuilder.cpp b/runtime/FlatbufferModelBuilder.cpp
new file mode 100644
index 000000000..be3faaa82
--- /dev/null
+++ b/runtime/FlatbufferModelBuilder.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FlatbufferModelBuilder"
+
+#include "FlatbufferModelBuilder.h"
+
+#include <LegacyUtils.h>
+
+#include "FlatbufferModelBuilderUtils.h"
+#include "operation_converters/OperationConverterResolver.h"
+
+namespace android {
+namespace nn {
+
+void FlatbufferModelBuilder::verifyModel(const tflite::Model* model) {
+ flatbuffers::Verifier verifier(mBuilder.GetBufferPointer(), mBuilder.GetSize());
+ CHECK(model != nullptr);
+ CHECK(model->Verify(verifier));
+}
+
+void FlatbufferModelBuilder::initializeBufferVector() {
+ mBufferVector.clear();
+
+ std::vector<uint8_t> emptyData;
+ auto emptyBuffer = tflite::CreateBufferDirect(mBuilder, &emptyData);
+ mBufferVector.push_back(emptyBuffer);
+}
+
+void FlatbufferModelBuilder::initializeOpCodeIndexForOperationType() {
+ mOpCodeIndexForOperationType.clear();
+ mOpCodeIndexForOperationType.resize(kNumberOfOperationTypes, -1);
+}
+
+std::vector<MetadataFlatbuffer> FlatbufferModelBuilder::createMetadataVector() {
+ std::vector<MetadataFlatbuffer> metadataVector;
+ for (uint32_t i = 0; i < mBufferVector.size(); i++) {
+ auto metadata = tflite::CreateMetadataDirect(mBuilder, std::to_string(i).c_str() /* name */,
+ i /* buffer */);
+ metadataVector.push_back(metadata);
+ }
+ return metadataVector;
+}
+
+Result<const tflite::Model*> FlatbufferModelBuilder::createTfliteModel() {
+ mModel = makeModel();
+
+ // Initialize and clear data structures
+ initializeBufferVector();
+ mOpCodesVector.clear();
+ initializeOpCodeIndexForOperationType();
+
+ // Generate subgraphs
+ auto subgraphsVector = NN_TRY(createSubGraphs());
+
+ auto metadataVector = createMetadataVector();
+
+ ModelFlatbuffer flatbufferModel = tflite::CreateModelDirect(
+ mBuilder, 3 /* version*/, &mOpCodesVector /* operator_codes */,
+ &subgraphsVector /* subgraphs */, nullptr /* description */,
+ &mBufferVector /* buffers */, nullptr /* metadata_buffer */,
+ &metadataVector /* metadata */);
+ mBuilder.Finish(flatbufferModel);
+
+ const tflite::Model* tfliteModel = tflite::GetModel(mBuilder.GetBufferPointer());
+ verifyModel(tfliteModel);
+ return tfliteModel;
+}
+
+Result<SubGraphFlatbuffer> FlatbufferModelBuilder::createSubGraphFlatbuffer(
+ const Model::Subgraph& subgraph) {
+ // TFLite does not support unspecified ranks in Operands
+ NN_TRY(checkAllTensorOperandsHaveSpecifiedRank(subgraph.operands));
+ // TFLite does not support dynamic shapes for subgrah output Operands
+ NN_TRY(checkNoSubgraphOutputOperandsHaveDynamicShape(subgraph.operands));
+
+ SubGraphContext context(&mModel, &subgraph, &mBuilder, &mOpCodesVector,
+ &mOpCodeIndexForOperationType, &mBufferVector);
+ for (const Operation& operation : subgraph.operations) {
+ const IOperationConverter* converter =
+ OperationConverterResolver::get()->findOperationConverter(operation.type);
+ NN_RET_CHECK(converter != nullptr)
+ << "IOperationConverter not implemented for OperationType: " << operation.type;
+
+ NN_TRY(converter->convert(operation, &context));
+ }
+
+ for (uint32_t idx : subgraph.inputIndexes) {
+ context.addSubGraphInput(idx);
+ }
+ for (uint32_t idx : subgraph.outputIndexes) {
+ context.addSubGraphOutput(idx);
+ }
+
+ return context.finish();
+}
+
+Result<std::vector<SubGraphFlatbuffer>> FlatbufferModelBuilder::createSubGraphs() {
+ // We do not support control flow yet
+ NN_RET_CHECK(mModel.referenced.empty()) << "Control flow for multiple subgraphs not supported";
+
+ std::vector<SubGraphFlatbuffer> subGraphVector;
+
+ auto mainSubGraph = NN_TRY(createSubGraphFlatbuffer(mModel.main));
+ subGraphVector.push_back(mainSubGraph);
+
+ return subGraphVector;
+}
+
+} // namespace nn
+} // namespace android
diff --git a/runtime/FlatbufferModelBuilder.h b/runtime/FlatbufferModelBuilder.h
new file mode 100644
index 000000000..2356cc6bc
--- /dev/null
+++ b/runtime/FlatbufferModelBuilder.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_FLATBUFFER_MODEL_BUILDER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_FLATBUFFER_MODEL_BUILDER_H
+
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <utility>
+#include <vector>
+
+#include "FlatbufferModelBuilderUtils.h"
+#include "ModelBuilder.h"
+#include "NeuralNetworks.h"
+
+namespace android {
+namespace nn {
+
+class FlatbufferModelBuilder : public ModelBuilder {
+ public:
+ // Return generated TFLite Model if successful
+ Result<const tflite::Model*> createTfliteModel();
+
+ private:
+ void verifyModel(const tflite::Model* model);
+
+ // Clears mBufferVector and initializes the first Buffer to be an empty Buffer
+ // for Tensors that do not have a buffer.
+ void initializeBufferVector();
+ // Clears mOpCodeIndexForOperationType and initializes elements to be -1
+ void initializeOpCodeIndexForOperationType();
+
+ // Helper functions to convert Subgraphs
+ Result<SubGraphFlatbuffer> createSubGraphFlatbuffer(const Model::Subgraph& subgraph);
+ Result<std::vector<SubGraphFlatbuffer>> createSubGraphs();
+
+ // Generates metadata for each Buffer
+ // Must be called after mBufferVector is filled.
+ std::vector<MetadataFlatbuffer> createMetadataVector();
+
+ flatbuffers::FlatBufferBuilder mBuilder;
+ Model mModel;
+
+ std::vector<OperatorCodeFlatbuffer> mOpCodesVector;
+ std::vector<int> mOpCodeIndexForOperationType;
+ std::vector<BufferFlatbuffer> mBufferVector;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_FLATBUFFER_MODEL_BUILDER_H
diff --git a/runtime/FlatbufferModelBuilderUtils.h b/runtime/FlatbufferModelBuilderUtils.h
new file mode 100644
index 000000000..106d93155
--- /dev/null
+++ b/runtime/FlatbufferModelBuilderUtils.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_FLATBUFFER_MODEL_BUILDER_UTILS_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_FLATBUFFER_MODEL_BUILDER_UTILS_H
+
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "NeuralNetworks.h"
+#include "TypeManager.h"
+
+namespace android {
+namespace nn {
+
+using SubGraphFlatbuffer = flatbuffers::Offset<tflite::SubGraph>;
+using SubGraphsFlatbuffer = flatbuffers::Offset<flatbuffers::Vector<SubGraphFlatbuffer>>;
+
+using OperatorCodeFlatbuffer = flatbuffers::Offset<tflite::OperatorCode>;
+using OperatorFlatbuffer = flatbuffers::Offset<tflite::Operator>;
+using OperatorsFlatbuffer = flatbuffers::Offset<flatbuffers::Vector<OperatorFlatbuffer>>;
+
+using TensorFlatbuffer = flatbuffers::Offset<tflite::Tensor>;
+using TensorsFlatbuffer = flatbuffers::Offset<flatbuffers::Vector<TensorFlatbuffer>>;
+
+using BufferFlatbuffer = flatbuffers::Offset<tflite::Buffer>;
+
+using MetadataFlatbuffer = flatbuffers::Offset<tflite::Metadata>;
+
+using ModelFlatbuffer = flatbuffers::Offset<tflite::Model>;
+
+// Only supports tensor types
+// Will crash if passed in a scalar type
+inline Result<tflite::TensorType> getTensorFlatbufferOperandType(const OperandType& type) {
+ CHECK(TypeManager::get()->isTensorType(type));
+
+ // TODO: Map more operands
+ switch (type) {
+ case OperandType::TENSOR_FLOAT32:
+ return tflite::TensorType::TensorType_FLOAT32;
+ case OperandType::TENSOR_INT32:
+ return tflite::TensorType::TensorType_INT32;
+ case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ return tflite::TensorType::TensorType_INT8;
+ default:
+ NN_RET_CHECK_FAIL() << "OperandType not supported: " << type;
+ }
+}
+
+inline tflite::BuiltinOperator getFlatbufferOperator(const OperationType& type) {
+ // TODO: Add more operation types
+ switch (type) {
+ case OperationType::PAD:
+ return tflite::BuiltinOperator::BuiltinOperator_PAD;
+ case OperationType::CONV_2D:
+ return tflite::BuiltinOperator::BuiltinOperator_CONV_2D;
+ case OperationType::ADD:
+ return tflite::BuiltinOperator::BuiltinOperator_ADD;
+ case OperationType::DEPTHWISE_CONV_2D:
+ return tflite::BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D;
+ case OperationType::LOGISTIC:
+ return tflite::BuiltinOperator::BuiltinOperator_LOGISTIC;
+ default:
+ LOG(FATAL) << "OperationType not supported: " << type;
+ return {};
+ }
+}
+
+// Referenced from external/tensorflow/tensorflow/lite/tools/versioning/op_version.cc
+inline int32_t getMaxOperatorVersionCode(tflite::BuiltinOperator builtinCode) {
+ // TODO: Add more builtin_codes
+ switch (builtinCode) {
+ case tflite::BuiltinOperator::BuiltinOperator_CONV_2D:
+ return 5;
+ case tflite::BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
+ return 6;
+ case tflite::BuiltinOperator::BuiltinOperator_ADD:
+ return 4;
+ case tflite::BuiltinOperator::BuiltinOperator_PAD:
+ return 4;
+ case tflite::BuiltinOperator::BuiltinOperator_LOGISTIC:
+ return 3;
+ default:
+ LOG(FATAL) << "BuiltinOperator not supported: " << builtinCode;
+ return {};
+ }
+}
+
+inline Result<tflite::ActivationFunctionType> getTfliteActivation(FusedActivationFunc activation) {
+ switch (activation) {
+ case FusedActivationFunc::NONE:
+ return tflite::ActivationFunctionType::ActivationFunctionType_NONE;
+ case FusedActivationFunc::RELU:
+ return tflite::ActivationFunctionType::ActivationFunctionType_RELU;
+ case FusedActivationFunc::RELU1:
+ return tflite::ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1;
+ case FusedActivationFunc::RELU6:
+ return tflite::ActivationFunctionType::ActivationFunctionType_RELU6;
+ default:
+ NN_RET_CHECK_FAIL() << "FusedActivationFunc not supported: " << activation;
+ }
+}
+
+inline bool tensorOperandHasUnspecifiedRank(const Operand& operand) {
+ return TypeManager::get()->isTensorType(operand.type) && operand.dimensions.empty();
+}
+
+inline Result<void> checkAllTensorOperandsHaveSpecifiedRank(const std::vector<Operand>& operands) {
+ NN_RET_CHECK(std::none_of(operands.begin(), operands.end(), &tensorOperandHasUnspecifiedRank))
+ << "At least one Operand has unspecified rank";
+ return {};
+}
+
+inline bool subgraphOutputOperandHasDynamicShape(const Operand& operand) {
+ return operand.lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT &&
+ std::any_of(operand.dimensions.begin(), operand.dimensions.end(),
+ [](const uint32_t& dim) { return dim == 0; });
+}
+
+inline Result<void> checkNoSubgraphOutputOperandsHaveDynamicShape(
+ const std::vector<Operand>& operands) {
+ NN_RET_CHECK(
+ std::none_of(operands.begin(), operands.end(), &subgraphOutputOperandHasDynamicShape))
+ << "At least one subgraph output Operand has dynamic shape";
+ return {};
+}
+
+inline bool isOperandConstant(const Operand& operand) {
+ return operand.lifetime == Operand::LifeTime::CONSTANT_COPY ||
+ operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE;
+}
+
+inline tflite::Padding getTFLitePadding(int32_t paddingType) {
+ switch (paddingType) {
+ case ANEURALNETWORKS_PADDING_VALID: // VALID
+ case 0:
+ return tflite::Padding::Padding_VALID;
+ case ANEURALNETWORKS_PADDING_SAME: // SAME
+ return tflite::Padding::Padding_SAME;
+ default:
+ LOG(FATAL) << "Unsupported NNAPI NDK padding type: " << paddingType;
+ return {};
+ }
+}
+
+// Replace all 0 dimensions to -1 since TFLite only supports -1 as an unknown dimension
+inline void replaceZeroDimensions(std::vector<int32_t>* dims) {
+ std::replace(dims->begin(), dims->end(), 0, -1);
+}
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_FLATBUFFER_MODEL_BUILDER_UTILS_H
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 28b7a69dd..127a4521d 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -716,6 +716,29 @@ int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t*
return ANEURALNETWORKS_NO_ERROR;
}
+#ifdef NN_DEBUGGABLE
+static int64_t sRuntimeFeatureLevel = 0;
+void forTest_setRuntimeFeatureLevel(int64_t level) {
+ sRuntimeFeatureLevel = level;
+}
+#endif
+
+// Since ANeuralNetworks_getRuntimeFeatureLevel is new in 31 while libneuralnetwork targets
+// "min_sdk_version: 30", calling it should be properly guarded (e.g. __builtin_available).
+// But calling it within the same compilation unit is perfectly fine. Guarding it doesn't
+// make any sense and is simply wrong. (It's available on a system where __builtin_available(30)
+// evaluates to false.)
+// To make the compiler happy we introduce getRuntimeFeatureLevelImpl() and call it within the
+// library.
+static inline int64_t getRuntimeFeatureLevelImpl() {
+#ifdef NN_DEBUGGABLE
+ if (sRuntimeFeatureLevel) {
+ return sRuntimeFeatureLevel;
+ }
+#endif
+ return DeviceManager::get()->getRuntimeFeatureLevel();
+}
+
int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
int64_t* featureLevel) {
if (device == nullptr || featureLevel == nullptr) {
@@ -727,7 +750,7 @@ int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
if (dFeatureLevel < 0) {
return ANEURALNETWORKS_BAD_STATE;
}
- *featureLevel = std::min(ANeuralNetworks_getRuntimeFeatureLevel(), dFeatureLevel);
+ *featureLevel = std::min(getRuntimeFeatureLevelImpl(), dFeatureLevel);
return ANEURALNETWORKS_NO_ERROR;
}
@@ -1661,20 +1684,8 @@ int ANeuralNetworksExecution_startComputeWithDependencies(
return n;
}
-#ifdef NN_DEBUGGABLE
-static int64_t sRuntimeFeatureLevel = 0;
-void forTest_setRuntimeFeatureLevel(int64_t level) {
- sRuntimeFeatureLevel = level;
-}
-#endif
-
int64_t ANeuralNetworks_getRuntimeFeatureLevel() {
-#ifdef NN_DEBUGGABLE
- if (sRuntimeFeatureLevel) {
- return sRuntimeFeatureLevel;
- }
-#endif
- return DeviceManager::get()->getRuntimeFeatureLevel();
+ return getRuntimeFeatureLevelImpl();
}
int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution,
diff --git a/runtime/NeuralNetworksV2.cpp b/runtime/NeuralNetworksV2.cpp
new file mode 100644
index 000000000..21049946b
--- /dev/null
+++ b/runtime/NeuralNetworksV2.cpp
@@ -0,0 +1,1674 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Contains all the entry points to the C Neural Networks API.
+// We do basic validation of the operands and then call the class
+// that implements the functionality.
+
+#define LOG_TAG "NeuralNetworks"
+
+#include <ControlFlow.h>
+#include <LegacyUtils.h>
+#include <MetaModel.h>
+#include <Tracing.h>
+#include <nnapi/Types.h>
+
+#include <algorithm>
+#include <cstddef>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "BurstBuilder.h"
+#include "CompilationBuilder.h"
+#include "Event.h"
+#include "ExecutionBuilder.h"
+#include "ExecutionCallback.h"
+#include "FlatbufferModelBuilder.h"
+#include "Manager.h"
+#include "Memory.h"
+#include "NeuralNetworks.h"
+#include "NeuralNetworksExtensions.h"
+#include "NeuralNetworksOEM.h"
+#include "Telemetry.h"
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#include "tensorflow/lite/interpreter.h"
+#include "tensorflow/lite/kernels/register.h"
+#include "tensorflow/lite/model.h"
+#pragma clang diagnostic pop
+
+using namespace android::nn;
+
+// Make sure the constants defined in the header files have not changed values.
+// IMPORTANT: When adding new values, update kNumberOfDataTypes or kNumberOfDataTypesOEM
+// in Utils.h.
+static_assert(ANEURALNETWORKS_FLOAT32 == 0, "ANEURALNETWORKS_FLOAT32 has changed");
+static_assert(ANEURALNETWORKS_INT32 == 1, "ANEURALNETWORKS_INT32 has changed");
+static_assert(ANEURALNETWORKS_UINT32 == 2, "ANEURALNETWORKS_UINT32 has changed");
+static_assert(ANEURALNETWORKS_TENSOR_FLOAT32 == 3, "ANEURALNETWORKS_TENSOR_FLOAT32 has changed");
+static_assert(ANEURALNETWORKS_TENSOR_INT32 == 4, "ANEURALNETWORKS_TENSOR_INT32 has changed");
+static_assert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM == 5,
+ "ANEURALNETWORKS_TENSOR_QUANT8_ASYMM has changed");
+static_assert(ANEURALNETWORKS_BOOL == 6, "ANEURALNETWORKS_BOOL has changed");
+static_assert(ANEURALNETWORKS_TENSOR_QUANT16_SYMM == 7,
+ "ANEURALNETWORKS_TENSOR_QUANT16_SYMM has changed");
+static_assert(ANEURALNETWORKS_TENSOR_FLOAT16 == 8, "ANEURALNETWORKS_TENSOR_FLOAT16 has changed");
+static_assert(ANEURALNETWORKS_TENSOR_BOOL8 == 9, "ANEURALNETWORKS_TENSOR_BOOL8 has changed");
+static_assert(ANEURALNETWORKS_FLOAT16 == 10, "ANEURALNETWORKS_FLOAT16 has changed");
+static_assert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL == 11,
+ "ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL has changed");
+static_assert(ANEURALNETWORKS_TENSOR_QUANT16_ASYMM == 12,
+ "ANEURALNETWORKS_TENSOR_QUANT16_ASYMM has changed");
+static_assert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM == 13,
+ "ANEURALNETWORKS_TENSOR_QUANT8_SYMM has changed");
+static_assert(ANEURALNETWORKS_OEM_SCALAR == 10000, "ANEURALNETWORKS_OEM_SCALAR has changed");
+static_assert(ANEURALNETWORKS_TENSOR_OEM_BYTE == 10001,
+ "ANEURALNETWORKS_TENSOR_OEM_BYTE has changed");
+
+// IMPORTANT: When adding new values, update kNumberOfOperationTypes or
+// kNumberOfOperationTypesOEMin Utils.h.
+static_assert(ANEURALNETWORKS_ADD == 0, "ANEURALNETWORKS_ADD has changed");
+static_assert(ANEURALNETWORKS_AVERAGE_POOL_2D == 1, "ANEURALNETWORKS_AVERAGE_POOL_2D has changed");
+static_assert(ANEURALNETWORKS_CONCATENATION == 2, "ANEURALNETWORKS_CONCATENATION has changed");
+static_assert(ANEURALNETWORKS_CONV_2D == 3, "ANEURALNETWORKS_CONV_2D has changed");
+static_assert(ANEURALNETWORKS_DEPTHWISE_CONV_2D == 4,
+ "ANEURALNETWORKS_DEPTHWISE_CONV_2D has changed");
+static_assert(ANEURALNETWORKS_DEPTH_TO_SPACE == 5, "ANEURALNETWORKS_DEPTH_TO_SPACE has changed");
+static_assert(ANEURALNETWORKS_DEQUANTIZE == 6, "ANEURALNETWORKS_DEQUANTIZE has changed");
+static_assert(ANEURALNETWORKS_EMBEDDING_LOOKUP == 7,
+ "ANEURALNETWORKS_EMBEDDING_LOOKUP has changed");
+static_assert(ANEURALNETWORKS_FLOOR == 8, "ANEURALNETWORKS_FLOOR has changed");
+static_assert(ANEURALNETWORKS_FULLY_CONNECTED == 9, "ANEURALNETWORKS_FULLY_CONNECTED has changed");
+static_assert(ANEURALNETWORKS_HASHTABLE_LOOKUP == 10,
+ "ANEURALNETWORKS_HASHTABLE_LOOKUP has changed");
+static_assert(ANEURALNETWORKS_L2_NORMALIZATION == 11,
+ "ANEURALNETWORKS_L2_NORMALIZATION has changed");
+static_assert(ANEURALNETWORKS_L2_POOL_2D == 12, "ANEURALNETWORKS_L2_POOL has changed");
+static_assert(ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION == 13,
+ "ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION has changed");
+static_assert(ANEURALNETWORKS_LOGISTIC == 14, "ANEURALNETWORKS_LOGISTIC has changed");
+static_assert(ANEURALNETWORKS_LSH_PROJECTION == 15, "ANEURALNETWORKS_LSH_PROJECTION has changed");
+static_assert(ANEURALNETWORKS_LSTM == 16, "ANEURALNETWORKS_LSTM has changed");
+static_assert(ANEURALNETWORKS_MAX_POOL_2D == 17, "ANEURALNETWORKS_MAX_POOL has changed");
+static_assert(ANEURALNETWORKS_MUL == 18, "ANEURALNETWORKS_MUL has changed");
+static_assert(ANEURALNETWORKS_RELU == 19, "ANEURALNETWORKS_RELU has changed");
+static_assert(ANEURALNETWORKS_RELU1 == 20, "ANEURALNETWORKS_RELU1 has changed");
+static_assert(ANEURALNETWORKS_RELU6 == 21, "ANEURALNETWORKS_RELU6 has changed");
+static_assert(ANEURALNETWORKS_RESHAPE == 22, "ANEURALNETWORKS_RESHAPE has changed");
+static_assert(ANEURALNETWORKS_RESIZE_BILINEAR == 23, "ANEURALNETWORKS_RESIZE_BILINEAR has changed");
+static_assert(ANEURALNETWORKS_RNN == 24, "ANEURALNETWORKS_RNN has changed");
+static_assert(ANEURALNETWORKS_SOFTMAX == 25, "ANEURALNETWORKS_SOFTMAX has changed");
+static_assert(ANEURALNETWORKS_SPACE_TO_DEPTH == 26, "ANEURALNETWORKS_SPACE_TO_DEPTH has changed");
+static_assert(ANEURALNETWORKS_SVDF == 27, "ANEURALNETWORKS_SVDF has changed");
+static_assert(ANEURALNETWORKS_TANH == 28, "ANEURALNETWORKS_TANH has changed");
+
+static_assert(ANEURALNETWORKS_BATCH_TO_SPACE_ND == 29,
+ "ANEURALNETWORKS_BATCH_TO_SPACE_ND has changed");
+static_assert(ANEURALNETWORKS_DIV == 30, "ANEURALNETWORKS_DIV has changed");
+static_assert(ANEURALNETWORKS_MEAN == 31, "ANEURALNETWORKS_MEAN has changed");
+static_assert(ANEURALNETWORKS_PAD == 32, "ANEURALNETWORKS_PAD has changed");
+static_assert(ANEURALNETWORKS_SPACE_TO_BATCH_ND == 33,
+ "ANEURALNETWORKS_SPACE_TO_BATCH_ND has changed");
+static_assert(ANEURALNETWORKS_SQUEEZE == 34, "ANEURALNETWORKS_SQUEEZE has changed");
+static_assert(ANEURALNETWORKS_STRIDED_SLICE == 35, "ANEURALNETWORKS_STRIDED_SLICE has changed");
+static_assert(ANEURALNETWORKS_SUB == 36, "ANEURALNETWORKS_TANH has changed");
+static_assert(ANEURALNETWORKS_TRANSPOSE == 37, "ANEURALNETWORKS_TRANSPOSE has changed");
+
+static_assert(ANEURALNETWORKS_ABS == 38, "ANEURALNETWORKS_ABS has changed");
+static_assert(ANEURALNETWORKS_ARGMAX == 39, "ANEURALNETWORKS_ARGMAX has changed");
+static_assert(ANEURALNETWORKS_ARGMIN == 40, "ANEURALNETWORKS_ARGMIN has changed");
+static_assert(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM == 41,
+ "ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM has changed");
+static_assert(ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM == 42,
+ "ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM has changed");
+static_assert(ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN == 43,
+ "ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN has changed");
+static_assert(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT == 44,
+ "ANEURALNETWORKS_BOX_WITH_NMS_LIMIT has changed");
+static_assert(ANEURALNETWORKS_CAST == 45, "ANEURALNETWORKS_CAST has changed");
+static_assert(ANEURALNETWORKS_CHANNEL_SHUFFLE == 46, "ANEURALNETWORKS_CHANNEL_SHUFFLE has changed");
+static_assert(ANEURALNETWORKS_DETECTION_POSTPROCESSING == 47,
+ "ANEURALNETWORKS_DETECTION_POSTPROCESSING has changed");
+static_assert(ANEURALNETWORKS_EQUAL == 48, "ANEURALNETWORKS_EQUAL has changed");
+static_assert(ANEURALNETWORKS_EXP == 49, "ANEURALNETWORKS_EXP has changed");
+static_assert(ANEURALNETWORKS_EXPAND_DIMS == 50, "ANEURALNETWORKS_EXPAND_DIMS has changed");
+static_assert(ANEURALNETWORKS_GATHER == 51, "ANEURALNETWORKS_GATHER has changed");
+static_assert(ANEURALNETWORKS_GENERATE_PROPOSALS == 52,
+ "ANEURALNETWORKS_GENERATE_PROPOSALS has changed");
+static_assert(ANEURALNETWORKS_GREATER == 53, "ANEURALNETWORKS_GREATER has changed");
+static_assert(ANEURALNETWORKS_GREATER_EQUAL == 54, "ANEURALNETWORKS_GREATER_EQUAL has changed");
+static_assert(ANEURALNETWORKS_GROUPED_CONV_2D == 55, "ANEURALNETWORKS_GROUPED_CONV_2D has changed");
+static_assert(ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT == 56,
+ "ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT has changed");
+static_assert(ANEURALNETWORKS_INSTANCE_NORMALIZATION == 57,
+ "ANEURALNETWORKS_INSTANCE_NORMALIZATION has changed");
+static_assert(ANEURALNETWORKS_LESS == 58, "ANEURALNETWORKS_LESS has changed");
+static_assert(ANEURALNETWORKS_LESS_EQUAL == 59, "ANEURALNETWORKS_LESS_EQUAL has changed");
+static_assert(ANEURALNETWORKS_LOG == 60, "ANEURALNETWORKS_LOG has changed");
+static_assert(ANEURALNETWORKS_LOGICAL_AND == 61, "ANEURALNETWORKS_LOGICAL_AND has changed");
+static_assert(ANEURALNETWORKS_LOGICAL_NOT == 62, "ANEURALNETWORKS_LOGICAL_NOT has changed");
+static_assert(ANEURALNETWORKS_LOGICAL_OR == 63, "ANEURALNETWORKS_LOGICAL_OR has changed");
+static_assert(ANEURALNETWORKS_LOG_SOFTMAX == 64, "ANEURALNETWORKS_LOG_SOFTMAX has changed");
+static_assert(ANEURALNETWORKS_MAXIMUM == 65, "ANEURALNETWORKS_MAXIMUM has changed");
+static_assert(ANEURALNETWORKS_MINIMUM == 66, "ANEURALNETWORKS_MINIMUM has changed");
+static_assert(ANEURALNETWORKS_NEG == 67, "ANEURALNETWORKS_NEG has changed");
+static_assert(ANEURALNETWORKS_NOT_EQUAL == 68, "ANEURALNETWORKS_NOT_EQUAL has changed");
+static_assert(ANEURALNETWORKS_PAD_V2 == 69, "ANEURALNETWORKS_PAD_V2 has changed");
+static_assert(ANEURALNETWORKS_POW == 70, "ANEURALNETWORKS_POW has changed");
+static_assert(ANEURALNETWORKS_PRELU == 71, "ANEURALNETWORKS_PRELU has changed");
+static_assert(ANEURALNETWORKS_QUANTIZE == 72, "ANEURALNETWORKS_QUANTIZE has changed");
+static_assert(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM == 73,
+ "ANEURALNETWORKS_QUANTIZED_16BIT_LSTM has changed");
+static_assert(ANEURALNETWORKS_RANDOM_MULTINOMIAL == 74,
+ "ANEURALNETWORKS_RANDOM_MULTINOMIAL has changed");
+static_assert(ANEURALNETWORKS_REDUCE_ALL == 75, "ANEURALNETWORKS_REDUCE_ALL has changed");
+static_assert(ANEURALNETWORKS_REDUCE_ANY == 76, "ANEURALNETWORKS_REDUCE_ANY has changed");
+static_assert(ANEURALNETWORKS_REDUCE_MAX == 77, "ANEURALNETWORKS_REDUCE_MAX has changed");
+static_assert(ANEURALNETWORKS_REDUCE_MIN == 78, "ANEURALNETWORKS_REDUCE_MIN has changed");
+static_assert(ANEURALNETWORKS_REDUCE_PROD == 79, "ANEURALNETWORKS_REDUCE_PROD has changed");
+static_assert(ANEURALNETWORKS_REDUCE_SUM == 80, "ANEURALNETWORKS_REDUCE_SUM has changed");
+static_assert(ANEURALNETWORKS_ROI_ALIGN == 81, "ANEURALNETWORKS_ROI_ALIGN has changed");
+static_assert(ANEURALNETWORKS_ROI_POOLING == 82, "ANEURALNETWORKS_ROI_POOLING has changed");
+static_assert(ANEURALNETWORKS_RSQRT == 83, "ANEURALNETWORKS_RSQRT has changed");
+static_assert(ANEURALNETWORKS_SELECT == 84, "ANEURALNETWORKS_SELECT has changed");
+static_assert(ANEURALNETWORKS_SIN == 85, "ANEURALNETWORKS_SIN has changed");
+static_assert(ANEURALNETWORKS_SLICE == 86, "ANEURALNETWORKS_SLICE has changed");
+static_assert(ANEURALNETWORKS_SPLIT == 87, "ANEURALNETWORKS_SPLIT has changed");
+static_assert(ANEURALNETWORKS_SQRT == 88, "ANEURALNETWORKS_SQRT has changed");
+static_assert(ANEURALNETWORKS_TILE == 89, "ANEURALNETWORKS_TILE has changed");
+static_assert(ANEURALNETWORKS_TOPK_V2 == 90, "ANEURALNETWORKS_TOPK_V2 has changed");
+static_assert(ANEURALNETWORKS_TRANSPOSE_CONV_2D == 91,
+ "ANEURALNETWORKS_TRANSPOSE_CONV_2D has changed");
+static_assert(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM == 92,
+ "ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM has changed");
+static_assert(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN == 93,
+ "ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN has changed");
+static_assert(ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR == 94,
+ "ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR has changed");
+static_assert(ANEURALNETWORKS_QUANTIZED_LSTM == 95, "ANEURALNETWORKS_QUANTIZED_LSTM has changed");
+static_assert(ANEURALNETWORKS_IF == 96, "ANEURALNETWORKS_IF has changed");
+static_assert(ANEURALNETWORKS_WHILE == 97, "ANEURALNETWORKS_WHILE has changed");
+static_assert(ANEURALNETWORKS_ELU == 98, "ANEURALNETWORKS_ELU has changed");
+static_assert(ANEURALNETWORKS_HARD_SWISH == 99, "ANEURALNETWORKS_HARD_SWISH has changed");
+static_assert(ANEURALNETWORKS_FILL == 100, "ANEURALNETWORKS_FILL has changed");
+static_assert(ANEURALNETWORKS_RANK == 101, "ANEURALNETWORKS_RANK has changed");
+static_assert(ANEURALNETWORKS_BATCH_MATMUL == 102, "ANEURALNETWORKS_BATCH_MATMUL has changed");
+static_assert(ANEURALNETWORKS_PACK == 103, "ANEURALNETWORKS_PACK has changed");
+static_assert(ANEURALNETWORKS_MIRROR_PAD == 104, "ANEURALNETWORKS_MIRROR_PAD has changed");
+static_assert(ANEURALNETWORKS_REVERSE == 105, "ANEURALNETWORKS_REVERSE has changed");
+static_assert(ANEURALNETWORKS_OEM_OPERATION == 10000, "ANEURALNETWORKS_OEM_OPERATION has changed");
+
+static_assert(ANEURALNETWORKS_FUSED_NONE == 0, "ANEURALNETWORKS_FUSED_NONE has changed");
+static_assert(ANEURALNETWORKS_FUSED_RELU == 1, "ANEURALNETWORKS_FUSED_RELU has changed");
+static_assert(ANEURALNETWORKS_FUSED_RELU1 == 2, "ANEURALNETWORKS_FUSED_RELU1 has changed");
+static_assert(ANEURALNETWORKS_FUSED_RELU6 == 3, "ANEURALNETWORKS_FUSED_RELU6 has changed");
+
+static_assert(ANEURALNETWORKS_PREFER_LOW_POWER == 0,
+ "ANEURALNETWORKS_PREFER_LOW_POWER has changed");
+static_assert(ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER == 1,
+ "ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER has changed");
+static_assert(ANEURALNETWORKS_PREFER_SUSTAINED_SPEED == 2,
+ "ANEURALNETWORKS_PREFER_SUSTAINED_SPEED has changed");
+
+static_assert(ANEURALNETWORKS_NO_ERROR == 0, "ANEURALNETWORKS_NO_ERROR has changed");
+static_assert(ANEURALNETWORKS_OUT_OF_MEMORY == 1, "ANEURALNETWORKS_OUT_OF_MEMORY has changed");
+static_assert(ANEURALNETWORKS_INCOMPLETE == 2, "ANEURALNETWORKS_INCOMPLETE has changed");
+static_assert(ANEURALNETWORKS_UNEXPECTED_NULL == 3, "ANEURALNETWORKS_UNEXPECTED_NULL has changed");
+static_assert(ANEURALNETWORKS_BAD_DATA == 4, "ANEURALNETWORKS_BAD_DATA has changed");
+static_assert(ANEURALNETWORKS_OP_FAILED == 5, "ANEURALNETWORKS_OP_FAILED has changed");
+static_assert(ANEURALNETWORKS_BAD_STATE == 6, "ANEURALNETWORKS_BAD_STATE has changed");
+static_assert(ANEURALNETWORKS_UNMAPPABLE == 7, "ANEURALNETWORKS_UNMAPPABLE has changed");
+static_assert(ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE == 8,
+ "ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE has changed");
+static_assert(ANEURALNETWORKS_UNAVAILABLE_DEVICE == 9,
+ "ANEURALNETWORKS_UNAVAILABLE_DEVICE has changed");
+static_assert(ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT == 10,
+ "ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT has changed");
+static_assert(ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT == 11,
+ "ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT has changed");
+static_assert(ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT == 12,
+ "ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT has changed");
+static_assert(ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT == 13,
+ "ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT has changed");
+static_assert(ANEURALNETWORKS_DEAD_OBJECT == 14, "ANEURALNETWORKS_DEAD_OBJECT has changed");
+
+static_assert(ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES == 128,
+ "ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES has changed");
+
+static_assert(ANEURALNETWORKS_DEVICE_UNKNOWN == 0, "ANEURALNETWORKS_DEVICE_UNKNOWN has changed");
+static_assert(ANEURALNETWORKS_DEVICE_OTHER == 1, "ANEURALNETWORKS_DEVICE_OTHER has changed");
+static_assert(ANEURALNETWORKS_DEVICE_CPU == 2, "ANEURALNETWORKS_DEVICE_CPU has changed");
+static_assert(ANEURALNETWORKS_DEVICE_GPU == 3, "ANEURALNETWORKS_DEVICE_GPU has changed");
+static_assert(ANEURALNETWORKS_DEVICE_ACCELERATOR == 4,
+ "ANEURALNETWORKS_DEVICE_ACCELERATOR has changed");
+
+static_assert(ANEURALNETWORKS_DURATION_ON_HARDWARE == 0,
+ "ANEURALNETWORKS_DURATION_ON_HARDWARE has changed");
+static_assert(ANEURALNETWORKS_DURATION_IN_DRIVER == 1,
+ "ANEURALNETWORKS_DURATION_IN_DRIVER has changed");
+static_assert(ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE == 2,
+ "ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE has changed");
+static_assert(ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER == 3,
+ "ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER has changed");
+
+// Make sure that the constants are compatible with the values defined in
+// hardware/interfaces/neuralnetworks/1.0/types.hal.
+static_assert(static_cast<int32_t>(OperandType::OEM) == ANEURALNETWORKS_OEM_SCALAR,
+ "OEM != ANEURALNETWORKS_OEM");
+static_assert(static_cast<int32_t>(OperandType::FLOAT32) == ANEURALNETWORKS_FLOAT32,
+ "FLOAT32 != ANEURALNETWORKS_FLOAT32");
+static_assert(static_cast<int32_t>(OperandType::INT32) == ANEURALNETWORKS_INT32,
+ "INT32 != ANEURALNETWORKS_INT32");
+static_assert(static_cast<int32_t>(OperandType::UINT32) == ANEURALNETWORKS_UINT32,
+ "UINT32 != ANEURALNETWORKS_UINT32");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) == ANEURALNETWORKS_TENSOR_OEM_BYTE,
+ "TENSOR_OEM_BYTE != ANEURALNETWORKS_TENSOR_OEM_BYTE");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_FLOAT32) == ANEURALNETWORKS_TENSOR_FLOAT32,
+ "TENSOR_FLOAT32 != ANEURALNETWORKS_TENSOR_FLOAT32");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) ==
+ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+ "TENSOR_QUANT8_ASYMM != ANEURALNETWORKS_TENSOR_QUANT8_ASYMM");
+
+static_assert(static_cast<int32_t>(OperationType::ADD) == ANEURALNETWORKS_ADD,
+ "OperationType::ADD != ANEURALNETWORKS_ADD");
+static_assert(static_cast<int32_t>(OperationType::AVERAGE_POOL_2D) ==
+ ANEURALNETWORKS_AVERAGE_POOL_2D,
+ "OperationType::AVERAGE_POOL_2D != ANEURALNETWORKS_AVERAGE_POOL_2D");
+static_assert(static_cast<int32_t>(OperationType::CONV_2D) == ANEURALNETWORKS_CONV_2D,
+ "OperationType::CONV_2D != ANEURALNETWORKS_CONV_2D");
+static_assert(static_cast<int32_t>(OperationType::DEPTHWISE_CONV_2D) ==
+ ANEURALNETWORKS_DEPTHWISE_CONV_2D,
+ "OperationType::DEPTHWISE_CONV_2D != ANEURALNETWORKS_DEPTHWISE_CONV_2D");
+static_assert(static_cast<int32_t>(OperationType::DEPTH_TO_SPACE) == ANEURALNETWORKS_DEPTH_TO_SPACE,
+ "OperationType::DEPTH_TO_SPACE != ANEURALNETWORKS_DEPTH_TO_SPACE");
+static_assert(static_cast<int32_t>(OperationType::DEQUANTIZE) == ANEURALNETWORKS_DEQUANTIZE,
+ "OperationType::DEQUANTIZE != ANEURALNETWORKS_DEQUANTIZE");
+static_assert(static_cast<int32_t>(OperationType::EMBEDDING_LOOKUP) ==
+ ANEURALNETWORKS_EMBEDDING_LOOKUP,
+ "OperationType::EMBEDDING_LOOKUP != ANEURALNETWORKS_EMBEDDING_LOOKUP");
+static_assert(static_cast<int32_t>(OperationType::FLOOR) == ANEURALNETWORKS_FLOOR,
+ "OperationType::FLOOR != ANEURALNETWORKS_FLOOR");
+static_assert(static_cast<int32_t>(OperationType::FULLY_CONNECTED) ==
+ ANEURALNETWORKS_FULLY_CONNECTED,
+ "OperationType::FULLY_CONNECTED != ANEURALNETWORKS_FULLY_CONNECTED");
+static_assert(static_cast<int32_t>(OperationType::HASHTABLE_LOOKUP) ==
+ ANEURALNETWORKS_HASHTABLE_LOOKUP,
+ "OperationType::HASHTABLE_LOOKUP != ANEURALNETWORKS_HASHTABLE_LOOKUP");
+static_assert(static_cast<int32_t>(OperationType::L2_NORMALIZATION) ==
+ ANEURALNETWORKS_L2_NORMALIZATION,
+ "OperationType::L2_NORMALIZATION != ANEURALNETWORKS_L2_NORMALIZATION");
+static_assert(static_cast<int32_t>(OperationType::L2_POOL_2D) == ANEURALNETWORKS_L2_POOL_2D,
+ "OperationType::L2_POOL_2D != ANEURALNETWORKS_L2_POOL_2D");
+static_assert(static_cast<int32_t>(OperationType::LOCAL_RESPONSE_NORMALIZATION) ==
+ ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
+ "OperationType::LOCAL_RESPONSE_NORMALIZATION != "
+ "ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION");
+static_assert(static_cast<int32_t>(OperationType::LOGISTIC) == ANEURALNETWORKS_LOGISTIC,
+ "OperationType::LOGISTIC != ANEURALNETWORKS_LOGISTIC");
+static_assert(static_cast<int32_t>(OperationType::LSH_PROJECTION) == ANEURALNETWORKS_LSH_PROJECTION,
+ "OperationType::LSH_PROJECTION != ANEURALNETWORKS_LSH_PROJECTION");
+static_assert(static_cast<int32_t>(OperationType::LSTM) == ANEURALNETWORKS_LSTM,
+ "OperationType::LSTM != ANEURALNETWORKS_LSTM");
+static_assert(static_cast<int32_t>(OperationType::MAX_POOL_2D) == ANEURALNETWORKS_MAX_POOL_2D,
+ "OperationType::MAX_POOL_2D != ANEURALNETWORKS_MAX_POOL_2D");
+static_assert(static_cast<int32_t>(OperationType::MUL) == ANEURALNETWORKS_MUL,
+ "OperationType::MUL != ANEURALNETWORKS_MUL");
+static_assert(static_cast<int32_t>(OperationType::RELU) == ANEURALNETWORKS_RELU,
+ "OperationType::RELU != ANEURALNETWORKS_RELU");
+static_assert(static_cast<int32_t>(OperationType::RELU1) == ANEURALNETWORKS_RELU1,
+ "OperationType::RELU1 != ANEURALNETWORKS_RELU1");
+static_assert(static_cast<int32_t>(OperationType::RELU6) == ANEURALNETWORKS_RELU6,
+ "OperationType::RELU6 != ANEURALNETWORKS_RELU6");
+static_assert(static_cast<int32_t>(OperationType::RESHAPE) == ANEURALNETWORKS_RESHAPE,
+ "OperationType::RESHAPE != ANEURALNETWORKS_RESHAPE");
+static_assert(static_cast<int32_t>(OperationType::RESIZE_BILINEAR) ==
+ ANEURALNETWORKS_RESIZE_BILINEAR,
+ "OperationType::RESIZE_BILINEAR != ANEURALNETWORKS_RESIZE_BILINEAR");
+static_assert(static_cast<int32_t>(OperationType::RNN) == ANEURALNETWORKS_RNN,
+ "OperationType::RNN != ANEURALNETWORKS_RNN");
+static_assert(static_cast<int32_t>(OperationType::SOFTMAX) == ANEURALNETWORKS_SOFTMAX,
+ "OperationType::SOFTMAX != ANEURALNETWORKS_SOFTMAX");
+static_assert(static_cast<int32_t>(OperationType::SPACE_TO_DEPTH) == ANEURALNETWORKS_SPACE_TO_DEPTH,
+ "OperationType::SPACE_TO_DEPTH != ANEURALNETWORKS_SPACE_TO_DEPTH");
+static_assert(static_cast<int32_t>(OperationType::SVDF) == ANEURALNETWORKS_SVDF,
+ "OperationType::SVDF != ANEURALNETWORKS_SVDF");
+static_assert(static_cast<int32_t>(OperationType::TANH) == ANEURALNETWORKS_TANH,
+ "OperationType::TANH != ANEURALNETWORKS_TANH");
+
+static_assert(static_cast<int32_t>(FusedActivationFunc::NONE) == ANEURALNETWORKS_FUSED_NONE,
+ "FusedActivationFunc::NONE != ANEURALNETWORKS_FUSED_NONE");
+static_assert(static_cast<int32_t>(FusedActivationFunc::RELU) == ANEURALNETWORKS_FUSED_RELU,
+ "FusedActivationFunc::RELU != ANEURALNETWORKS_FUSED_RELU");
+static_assert(static_cast<int32_t>(FusedActivationFunc::RELU1) == ANEURALNETWORKS_FUSED_RELU1,
+ "FusedActivationFunc::RELU1 != ANEURALNETWORKS_FUSED_RELU1");
+static_assert(static_cast<int32_t>(FusedActivationFunc::RELU6) == ANEURALNETWORKS_FUSED_RELU6,
+ "FusedActivationFunc::RELU6 != ANEURALNETWORKS_FUSED_RELU6");
+
+// Make sure that the constants are compatible with the values defined in
+// hardware/interfaces/neuralnetworks/1.1/types.hal.
+static_assert(static_cast<int32_t>(OperationType::BATCH_TO_SPACE_ND) ==
+ ANEURALNETWORKS_BATCH_TO_SPACE_ND,
+ "OperationType::BATCH_TO_SPACE_ND != ANEURALNETWORKS_BATCH_TO_SPACE_ND");
+static_assert(static_cast<int32_t>(OperationType::DIV) == ANEURALNETWORKS_DIV,
+ "OperationType::DIV != ANEURALNETWORKS_DIV");
+static_assert(static_cast<int32_t>(OperationType::MEAN) == ANEURALNETWORKS_MEAN,
+ "OperationType::MEAN != ANEURALNETWORKS_MEAN");
+static_assert(static_cast<int32_t>(OperationType::PAD) == ANEURALNETWORKS_PAD,
+ "OperationType::PAD != ANEURALNETWORKS_PAD");
+static_assert(static_cast<int32_t>(OperationType::SPACE_TO_BATCH_ND) ==
+ ANEURALNETWORKS_SPACE_TO_BATCH_ND,
+ "OperationType::SPACE_TO_BATCH_ND != ANEURALNETWORKS_SPACE_TO_BATCH_ND");
+static_assert(static_cast<int32_t>(OperationType::SQUEEZE) == ANEURALNETWORKS_SQUEEZE,
+ "OperationType::SQUEEZE != ANEURALNETWORKS_SQUEEZE");
+static_assert(static_cast<int32_t>(OperationType::STRIDED_SLICE) == ANEURALNETWORKS_STRIDED_SLICE,
+ "OperationType::STRIDED_SLICE != ANEURALNETWORKS_STRIDED_SLICE");
+static_assert(static_cast<int32_t>(OperationType::SUB) == ANEURALNETWORKS_SUB,
+ "OperationType::SUB != ANEURALNETWORKS_SUB");
+static_assert(static_cast<int32_t>(OperationType::TRANSPOSE) == ANEURALNETWORKS_TRANSPOSE,
+ "OperationType::TRANSPOSE != ANEURALNETWORKS_TRANSPOSE");
+
+// Make sure that the constants are compatible with the values defined in
+// hardware/interfaces/neuralnetworks/1.2/types.hal.
+static_assert(static_cast<int32_t>(OperandType::BOOL) == ANEURALNETWORKS_BOOL,
+ "BOOL != ANEURALNETWORKS_BOOL");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_QUANT16_SYMM) ==
+ ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
+ "TENSOR_QUANT16_SYMM != ANEURALNETWORKS_TENSOR_QUANT16_SYMM");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_FLOAT16) == ANEURALNETWORKS_TENSOR_FLOAT16,
+ "TENSOR_FLOAT16 != ANEURALNETWORKS_TENSOR_FLOAT16");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_BOOL8) == ANEURALNETWORKS_TENSOR_BOOL8,
+ "TENSOR_BOOL8 != ANEURALNETWORKS_TENSOR_BOOL8");
+static_assert(static_cast<int32_t>(OperandType::FLOAT16) == ANEURALNETWORKS_FLOAT16,
+ "FLOAT16 != ANEURALNETWORKS_FLOAT16");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) ==
+ ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
+ "TENSOR_QUANT8_SYMM_PER_CHANNEL != ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_QUANT16_ASYMM) ==
+ ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
+ "TENSOR_QUANT16_ASYMM != ANEURALNETWORKS_TENSOR_QUANT16_ASYMM");
+static_assert(static_cast<int32_t>(OperandType::TENSOR_QUANT8_SYMM) ==
+ ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
+ "TENSOR_QUANT8_SYMM != ANEURALNETWORKS_TENSOR_QUANT8_SYMM");
+
+static_assert(static_cast<int32_t>(OperationType::ABS) == ANEURALNETWORKS_ABS,
+ "OperationType::ABS != ANEURALNETWORKS_ABS");
+static_assert(static_cast<int32_t>(OperationType::ARGMAX) == ANEURALNETWORKS_ARGMAX,
+ "OperationType::ARGMAX != ANEURALNETWORKS_ARGMAX");
+static_assert(static_cast<int32_t>(OperationType::ARGMIN) == ANEURALNETWORKS_ARGMIN,
+ "OperationType::ARGMIN != ANEURALNETWORKS_ARGMIN");
+static_assert(static_cast<int32_t>(OperationType::AXIS_ALIGNED_BBOX_TRANSFORM) ==
+ ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM,
+ "OperationType::AXIS_ALIGNED_BBOX_TRANSFORM != "
+ "ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM");
+static_assert(static_cast<int32_t>(OperationType::BIDIRECTIONAL_SEQUENCE_LSTM) ==
+ ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM,
+ "OperationType::BIDIRECTIONAL_SEQUENCE_LSTM != "
+ "ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM");
+static_assert(
+ static_cast<int32_t>(OperationType::BIDIRECTIONAL_SEQUENCE_RNN) ==
+ ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN,
+ "OperationType::BIDIRECTIONAL_SEQUENCE_RNN != ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN");
+static_assert(static_cast<int32_t>(OperationType::BOX_WITH_NMS_LIMIT) ==
+ ANEURALNETWORKS_BOX_WITH_NMS_LIMIT,
+ "OperationType::BOX_WITH_NMS_LIMIT != ANEURALNETWORKS_BOX_WITH_NMS_LIMIT");
+static_assert(static_cast<int32_t>(OperationType::CAST) == ANEURALNETWORKS_CAST,
+ "OperationType::CAST != ANEURALNETWORKS_CAST");
+static_assert(static_cast<int32_t>(OperationType::CHANNEL_SHUFFLE) ==
+ ANEURALNETWORKS_CHANNEL_SHUFFLE,
+ "OperationType::CHANNEL_SHUFFLE != ANEURALNETWORKS_CHANNEL_SHUFFLE");
+static_assert(
+ static_cast<int32_t>(OperationType::DETECTION_POSTPROCESSING) ==
+ ANEURALNETWORKS_DETECTION_POSTPROCESSING,
+ "OperationType::DETECTION_POSTPROCESSING != ANEURALNETWORKS_DETECTION_POSTPROCESSING");
+static_assert(static_cast<int32_t>(OperationType::EQUAL) == ANEURALNETWORKS_EQUAL,
+ "OperationType::EQUAL != ANEURALNETWORKS_EQUAL");
+static_assert(static_cast<int32_t>(OperationType::EXP) == ANEURALNETWORKS_EXP,
+ "OperationType::EXP != ANEURALNETWORKS_EXP");
+static_assert(static_cast<int32_t>(OperationType::EXPAND_DIMS) == ANEURALNETWORKS_EXPAND_DIMS,
+ "OperationType::EXPAND_DIMS != ANEURALNETWORKS_EXPAND_DIMS");
+static_assert(static_cast<int32_t>(OperationType::GATHER) == ANEURALNETWORKS_GATHER,
+ "OperationType::GATHER != ANEURALNETWORKS_GATHER");
+static_assert(static_cast<int32_t>(OperationType::GENERATE_PROPOSALS) ==
+ ANEURALNETWORKS_GENERATE_PROPOSALS,
+ "OperationType::GENERATE_PROPOSALS != ANEURALNETWORKS_GENERATE_PROPOSALS");
+static_assert(static_cast<int32_t>(OperationType::GREATER) == ANEURALNETWORKS_GREATER,
+ "OperationType::GREATER != ANEURALNETWORKS_GREATER");
+static_assert(static_cast<int32_t>(OperationType::GREATER_EQUAL) == ANEURALNETWORKS_GREATER_EQUAL,
+ "OperationType::GREATER_EQUAL != ANEURALNETWORKS_GREATER_EQUAL");
+static_assert(static_cast<int32_t>(OperationType::GROUPED_CONV_2D) ==
+ ANEURALNETWORKS_GROUPED_CONV_2D,
+ "OperationType::GROUPED_CONV_2D != ANEURALNETWORKS_GROUPED_CONV_2D");
+static_assert(static_cast<int32_t>(OperationType::HEATMAP_MAX_KEYPOINT) ==
+ ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT,
+ "OperationType::HEATMAP_MAX_KEYPOINT != ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT");
+static_assert(static_cast<int32_t>(OperationType::INSTANCE_NORMALIZATION) ==
+ ANEURALNETWORKS_INSTANCE_NORMALIZATION,
+ "OperationType::INSTANCE_NORMALIZATION != ANEURALNETWORKS_INSTANCE_NORMALIZATION");
+static_assert(static_cast<int32_t>(OperationType::LESS) == ANEURALNETWORKS_LESS,
+ "OperationType::LESS != ANEURALNETWORKS_LESS");
+static_assert(static_cast<int32_t>(OperationType::LESS_EQUAL) == ANEURALNETWORKS_LESS_EQUAL,
+ "OperationType::LESS_EQUAL != ANEURALNETWORKS_LESS_EQUAL");
+static_assert(static_cast<int32_t>(OperationType::LOG) == ANEURALNETWORKS_LOG,
+ "OperationType::LOG != ANEURALNETWORKS_LOG");
+static_assert(static_cast<int32_t>(OperationType::LOGICAL_AND) == ANEURALNETWORKS_LOGICAL_AND,
+ "OperationType::LOGICAL_AND != ANEURALNETWORKS_LOGICAL_AND");
+static_assert(static_cast<int32_t>(OperationType::LOGICAL_NOT) == ANEURALNETWORKS_LOGICAL_NOT,
+ "OperationType::LOGICAL_NOT != ANEURALNETWORKS_LOGICAL_NOT");
+static_assert(static_cast<int32_t>(OperationType::LOGICAL_OR) == ANEURALNETWORKS_LOGICAL_OR,
+ "OperationType::LOGICAL_OR != ANEURALNETWORKS_LOGICAL_OR");
+static_assert(static_cast<int32_t>(OperationType::LOG_SOFTMAX) == ANEURALNETWORKS_LOG_SOFTMAX,
+ "OperationType::LOG_SOFTMAX != ANEURALNETWORKS_LOG_SOFTMAX");
+static_assert(static_cast<int32_t>(OperationType::MAXIMUM) == ANEURALNETWORKS_MAXIMUM,
+ "OperationType::MAXIMUM != ANEURALNETWORKS_MAXIMUM");
+static_assert(static_cast<int32_t>(OperationType::MINIMUM) == ANEURALNETWORKS_MINIMUM,
+ "OperationType::MINIMUM != ANEURALNETWORKS_MINIMUM");
+static_assert(static_cast<int32_t>(OperationType::NEG) == ANEURALNETWORKS_NEG,
+ "OperationType::NEG != ANEURALNETWORKS_NEG");
+static_assert(static_cast<int32_t>(OperationType::NOT_EQUAL) == ANEURALNETWORKS_NOT_EQUAL,
+ "OperationType::NOT_EQUAL != ANEURALNETWORKS_NOT_EQUAL");
+static_assert(static_cast<int32_t>(OperationType::PAD_V2) == ANEURALNETWORKS_PAD_V2,
+ "OperationType::PAD_V2 != ANEURALNETWORKS_PAD_V2");
+static_assert(static_cast<int32_t>(OperationType::POW) == ANEURALNETWORKS_POW,
+ "OperationType::POW != ANEURALNETWORKS_POW");
+static_assert(static_cast<int32_t>(OperationType::PRELU) == ANEURALNETWORKS_PRELU,
+ "OperationType::PRELU != ANEURALNETWORKS_PRELU");
+static_assert(static_cast<int32_t>(OperationType::QUANTIZE) == ANEURALNETWORKS_QUANTIZE,
+ "OperationType::QUANTIZE != ANEURALNETWORKS_QUANTIZE");
+static_assert(static_cast<int32_t>(OperationType::QUANTIZED_16BIT_LSTM) ==
+ ANEURALNETWORKS_QUANTIZED_16BIT_LSTM,
+ "OperationType::QUANTIZED_16BIT_LSTM != ANEURALNETWORKS_QUANTIZED_16BIT_LSTM");
+static_assert(static_cast<int32_t>(OperationType::RANDOM_MULTINOMIAL) ==
+ ANEURALNETWORKS_RANDOM_MULTINOMIAL,
+ "OperationType::RANDOM_MULTINOMIAL != ANEURALNETWORKS_RANDOM_MULTINOMIAL");
+static_assert(static_cast<int32_t>(OperationType::REDUCE_ALL) == ANEURALNETWORKS_REDUCE_ALL,
+ "OperationType::REDUCE_ALL != ANEURALNETWORKS_REDUCE_ALL");
+static_assert(static_cast<int32_t>(OperationType::REDUCE_ANY) == ANEURALNETWORKS_REDUCE_ANY,
+ "OperationType::REDUCE_ANY != ANEURALNETWORKS_REDUCE_ANY");
+static_assert(static_cast<int32_t>(OperationType::REDUCE_MAX) == ANEURALNETWORKS_REDUCE_MAX,
+ "OperationType::REDUCE_MAX != ANEURALNETWORKS_REDUCE_MAX");
+static_assert(static_cast<int32_t>(OperationType::REDUCE_MIN) == ANEURALNETWORKS_REDUCE_MIN,
+ "OperationType::REDUCE_MIN != ANEURALNETWORKS_REDUCE_MIN");
+static_assert(static_cast<int32_t>(OperationType::REDUCE_PROD) == ANEURALNETWORKS_REDUCE_PROD,
+ "OperationType::REDUCE_PROD != ANEURALNETWORKS_REDUCE_PROD");
+static_assert(static_cast<int32_t>(OperationType::REDUCE_SUM) == ANEURALNETWORKS_REDUCE_SUM,
+ "OperationType::REDUCE_SUM != ANEURALNETWORKS_REDUCE_SUM");
+static_assert(static_cast<int32_t>(OperationType::ROI_ALIGN) == ANEURALNETWORKS_ROI_ALIGN,
+ "OperationType::ROI_ALIGN != ANEURALNETWORKS_ROI_ALIGN");
+static_assert(static_cast<int32_t>(OperationType::ROI_POOLING) == ANEURALNETWORKS_ROI_POOLING,
+ "OperationType::ROI_POOLING != ANEURALNETWORKS_ROI_POOLING");
+static_assert(static_cast<int32_t>(OperationType::RSQRT) == ANEURALNETWORKS_RSQRT,
+ "OperationType::RSQRT != ANEURALNETWORKS_RSQRT");
+static_assert(static_cast<int32_t>(OperationType::SELECT) == ANEURALNETWORKS_SELECT,
+ "OperationType::SELECT != ANEURALNETWORKS_SELECT");
+static_assert(static_cast<int32_t>(OperationType::SIN) == ANEURALNETWORKS_SIN,
+ "OperationType::SIN != ANEURALNETWORKS_SIN");
+static_assert(static_cast<int32_t>(OperationType::SLICE) == ANEURALNETWORKS_SLICE,
+ "OperationType::SLICE != ANEURALNETWORKS_SLICE");
+static_assert(static_cast<int32_t>(OperationType::SPLIT) == ANEURALNETWORKS_SPLIT,
+ "OperationType::SPLIT != ANEURALNETWORKS_SPLIT");
+static_assert(static_cast<int32_t>(OperationType::SQRT) == ANEURALNETWORKS_SQRT,
+ "OperationType::SQRT != ANEURALNETWORKS_SQRT");
+static_assert(static_cast<int32_t>(OperationType::TILE) == ANEURALNETWORKS_TILE,
+ "OperationType::TILE != ANEURALNETWORKS_TILE");
+static_assert(static_cast<int32_t>(OperationType::TOPK_V2) == ANEURALNETWORKS_TOPK_V2,
+ "OperationType::TOPK_V2 != ANEURALNETWORKS_TOPK_V2");
+static_assert(static_cast<int32_t>(OperationType::TRANSPOSE_CONV_2D) ==
+ ANEURALNETWORKS_TRANSPOSE_CONV_2D,
+ "OperationType::TRANSPOSE_CONV_2D != ANEURALNETWORKS_TRANSPOSE_CONV_2D");
+static_assert(static_cast<int32_t>(OperationType::UNIDIRECTIONAL_SEQUENCE_LSTM) ==
+ ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM,
+ "OperationType::UNIDIRECTIONAL_SEQUENCE_LSTM != "
+ "ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM");
+static_assert(static_cast<int32_t>(OperationType::UNIDIRECTIONAL_SEQUENCE_RNN) ==
+ ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN,
+ "OperationType::UNIDIRECTIONAL_SEQUENCE_RNN != "
+ "ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN");
+static_assert(static_cast<int32_t>(OperationType::RESIZE_NEAREST_NEIGHBOR) ==
+ ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR,
+ "OperationType::RESIZE_NEAREST_NEIGHBOR != ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR");
+static_assert(static_cast<int32_t>(OperationType::QUANTIZED_LSTM) == ANEURALNETWORKS_QUANTIZED_LSTM,
+ "OperationType::QUANTIZED_LSTM != ANEURALNETWORKS_QUANTIZED_LSTM");
+static_assert(static_cast<int32_t>(OperationType::IF) == ANEURALNETWORKS_IF,
+ "OperationType::IF != ANEURALNETWORKS_IF");
+static_assert(static_cast<int32_t>(OperationType::WHILE) == ANEURALNETWORKS_WHILE,
+ "OperationType::WHILE != ANEURALNETWORKS_WHILE");
+static_assert(static_cast<int32_t>(OperationType::ELU) == ANEURALNETWORKS_ELU,
+ "OperationType::ELU != ANEURALNETWORKS_ELU");
+static_assert(static_cast<int32_t>(OperationType::HARD_SWISH) == ANEURALNETWORKS_HARD_SWISH,
+ "OperationType::HARD_SWISH != ANEURALNETWORKS_HARD_SWISH");
+static_assert(static_cast<int32_t>(OperationType::FILL) == ANEURALNETWORKS_FILL,
+ "OperationType::FILL != ANEURALNETWORKS_FILL");
+static_assert(static_cast<int32_t>(OperationType::RANK) == ANEURALNETWORKS_RANK,
+ "OperationType::RANK != ANEURALNETWORKS_RANK");
+static_assert(static_cast<int32_t>(OperationType::BATCH_MATMUL) == ANEURALNETWORKS_BATCH_MATMUL,
+ "OperationType::BATCH_MATMUL != ANEURALNETWORKS_BATCH_MATMUL");
+static_assert(static_cast<int32_t>(OperationType::PACK) == ANEURALNETWORKS_PACK,
+ "OperationType::PACK != ANEURALNETWORKS_PACK");
+static_assert(static_cast<int32_t>(OperationType::MIRROR_PAD) == ANEURALNETWORKS_MIRROR_PAD,
+ "OperationType::MIRROR_PAD != ANEURALNETWORKS_MIRROR_PAD");
+static_assert(static_cast<int32_t>(OperationType::REVERSE) == ANEURALNETWORKS_REVERSE,
+ "OperationType::REVERSE != ANEURALNETWORKS_REVERSE");
+
+static_assert(static_cast<int32_t>(DeviceType::OTHER) == ANEURALNETWORKS_DEVICE_OTHER,
+ "DeviceType::OTHER != ANEURALNETWORKS_DEVICE_OTHER");
+static_assert(static_cast<int32_t>(DeviceType::CPU) == ANEURALNETWORKS_DEVICE_CPU,
+ "DeviceType::CPU != ANEURALNETWORKS_DEVICE_CPU");
+static_assert(static_cast<int32_t>(DeviceType::GPU) == ANEURALNETWORKS_DEVICE_GPU,
+ "DeviceType::GPU != ANEURALNETWORKS_DEVICE_GPU");
+static_assert(static_cast<int32_t>(DeviceType::ACCELERATOR) == ANEURALNETWORKS_DEVICE_ACCELERATOR,
+ "DeviceType::ACCELERATOR != ANEURALNETWORKS_DEVICE_ACCELERATOR");
+
+// Make sure that the constants are compatible with the values defined in
+// hardware/interfaces/neuralnetworks/1.3/types.hal.
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_LOW) ==
+ Priority::LOW,
+ "ANEURALNETWORKS_PRIORITY_LOW does not map to Priority::LOW");
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) ==
+ Priority::MEDIUM,
+ "ANEURALNETWORKS_PRIORITY_MEDIUM does not map to Priority::MEDIUM");
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_HIGH) ==
+ Priority::HIGH,
+ "ANEURALNETWORKS_PRIORITY_HIGH does not map to Priority::HIGH");
+
+// Asserts for ANeuralNetworksOperandType memory layout
+static_assert(offsetof(ANeuralNetworksOperandType, type) == 0,
+ "ANeuralNetworksOperandType.type offset != 0");
+static_assert(offsetof(ANeuralNetworksOperandType, dimensionCount) == 4,
+ "ANeuralNetworksOperandType.dimensionCount offset != 4");
+static_assert(offsetof(ANeuralNetworksOperandType, dimensions) == 8,
+ "ANeuralNetworksOperandType.dimensions offset != 8");
+static_assert(offsetof(ANeuralNetworksOperandType, scale) == 8 + sizeof(void*),
+ "ANeuralNetworksOperandType.scale offset != 8 + sizeof(void*)");
+static_assert(offsetof(ANeuralNetworksOperandType, zeroPoint) == 12 + sizeof(void*),
+ "ANeuralNetworksOperandType.zeroPoint offset != 12 + sizeof(void*)");
+static_assert(sizeof(ANeuralNetworksOperandType) == 16 + sizeof(void*),
+ "ANeuralNetworksOperandType size changed");
+static_assert(alignof(ANeuralNetworksOperandType) == alignof(void*),
+ "ANeuralNetworksOperandType alignment changed");
+
+// Asserts for ANeuralNetworksSymmPerChannelQuantParams memory layout
+static_assert(offsetof(ANeuralNetworksSymmPerChannelQuantParams, channelDim) == 0,
+ "ANeuralNetworksSymmPerChannelQuantParams.channelDim offset != 4 + sizeof(void*)");
+static_assert(offsetof(ANeuralNetworksSymmPerChannelQuantParams, scaleCount) == 4,
+ "ANeuralNetworksSymmPerChannelQuantParams.scaleCount offset != 0");
+static_assert(offsetof(ANeuralNetworksSymmPerChannelQuantParams, scales) == 8,
+ "ANeuralNetworksSymmPerChannelQuantParams.scales offset != 4");
+static_assert(sizeof(ANeuralNetworksSymmPerChannelQuantParams) == 8 + sizeof(void*),
+ "ANeuralNetworksSymmPerChannelQuantParams size != 8 + sizeof(void*)");
+static_assert(alignof(ANeuralNetworksSymmPerChannelQuantParams) == alignof(void*),
+ "ANeuralNetworksOperandType alignment changed");
+
+// Asserts for compilation caching
+static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == 32,
+ "ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN has changed");
+static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == kByteSizeOfCacheToken,
+ "ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN != kByteSizeOfCacheToken");
+
+// Asserts for compilation priority
+static_assert(ANEURALNETWORKS_PRIORITY_LOW == 90, "ANEURALNETWORKS_PRIORITY_LOW has changed");
+static_assert(ANEURALNETWORKS_PRIORITY_MEDIUM == 100,
+ "ANEURALNETWORKS_PRIORITY_MEDIUM has changed");
+static_assert(ANEURALNETWORKS_PRIORITY_HIGH == 110, "ANEURALNETWORKS_PRIORITY_HIGH has changed");
+static_assert(ANEURALNETWORKS_PRIORITY_DEFAULT == ANEURALNETWORKS_PRIORITY_MEDIUM,
+ "ANEURALNETWORKS_PRIORITY_DEFAULT has changed");
+
+// Asserts for feature levels
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_1 == 27, "ANEURALNETWORKS_FEATURE_LEVEL_1 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_2 == 28, "ANEURALNETWORKS_FEATURE_LEVEL_2 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_3 == 29, "ANEURALNETWORKS_FEATURE_LEVEL_3 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_4 == 30, "ANEURALNETWORKS_FEATURE_LEVEL_4 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_5 == 31, "ANEURALNETWORKS_FEATURE_LEVEL_5 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_6 == 1000006,
+ "ANEURALNETWORKS_FEATURE_LEVEL_6 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_7 == 1000007,
+ "ANEURALNETWORKS_FEATURE_LEVEL_7 has changed");
+static_assert(ANEURALNETWORKS_FEATURE_LEVEL_8 == 1000008,
+ "ANEURALNETWORKS_FEATURE_LEVEL_8 has changed");
+
+int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) {
+ if (numDevices == nullptr) {
+ LOG(ERROR) << "ANeuralNetworks_getDeviceCount passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ *numDevices = DeviceManager::get()->getDrivers().size();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device) {
+ if (device == nullptr) {
+ LOG(ERROR) << "ANeuralNetworks_getDevice passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const std::vector<std::shared_ptr<Device>>& devices = DeviceManager::get()->getDrivers();
+ if (devIndex >= devices.size()) {
+ LOG(ERROR) << "ANeuralNetworks_getDevice passed an invalid device index";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ *device = reinterpret_cast<ANeuralNetworksDevice*>(devices.at(devIndex).get());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name) {
+ if (device == nullptr || name == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getName passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const Device* d = reinterpret_cast<const Device*>(device);
+ *name = d->getName().c_str();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version) {
+ if (device == nullptr || version == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getVersion passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const Device* d = reinterpret_cast<const Device*>(device);
+ *version = d->getVersionString().c_str();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type) {
+ if (device == nullptr || type == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getType passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const Device* d = reinterpret_cast<const Device*>(device);
+ int32_t dType = d->getType();
+ if (dType < 0) {
+ return ANEURALNETWORKS_OP_FAILED;
+ }
+ *type = d->getType();
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+#ifdef NN_DEBUGGABLE
+static int64_t sRuntimeFeatureLevel = 0;
+void forTest_setRuntimeFeatureLevel(int64_t level) {
+ sRuntimeFeatureLevel = level;
+}
+#endif
+
+// Since ANeuralNetworks_getRuntimeFeatureLevel is new in 31 while libneuralnetwork targets
+// "min_sdk_version: 30", calling it should be properly guarded (e.g. __builtin_available).
+// But calling it within the same compilation unit is perfectly fine. Guarding it doesn't
+// make any sense and is simply wrong. (It's available on a system where __builtin_available(30)
+// evaluates to false.)
+// To make the compiler happy we introduce getRuntimeFeatureLevelImpl() and call it within the
+// library.
+static inline int64_t getRuntimeFeatureLevelImpl() {
+#ifdef NN_DEBUGGABLE
+ if (sRuntimeFeatureLevel) {
+ return sRuntimeFeatureLevel;
+ }
+#endif
+ return DeviceManager::get()->getRuntimeFeatureLevel();
+}
+
+int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
+ int64_t* featureLevel) {
+ if (device == nullptr || featureLevel == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getFeatureLevel passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ Device* d = reinterpret_cast<Device*>(const_cast<ANeuralNetworksDevice*>(device));
+ int64_t dFeatureLevel = DeviceManager::versionToFeatureLevel(d->getFeatureLevel().level);
+ if (dFeatureLevel < 0) {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+ *featureLevel = std::min(getRuntimeFeatureLevelImpl(), dFeatureLevel);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) {
+ if (device == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_wait passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const Device* d = reinterpret_cast<const Device*>(device);
+ return d->wait();
+}
+
+int ANeuralNetworksModel_getSupportedOperationsForDevices(
+ const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
+ uint32_t numDevices, bool* supportedOps) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksModel_getSupportedOperationsForDevices");
+ if (model == nullptr || devices == nullptr || supportedOps == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksModel_getSupportedOperationsForDevices passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (numDevices == 0) {
+ LOG(ERROR) << "ANeuralNetworksModel_getSupportedOperationsForDevices passed an empty "
+ "device list";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ const FlatbufferModelBuilder* m = reinterpret_cast<const FlatbufferModelBuilder*>(model);
+ if (!m->isFinished() || !m->isValid()) {
+ LOG(ERROR) << "ANeuralNetworksModel_getSupportedOperationsForDevices passed an unfinished "
+ "or invalid Model";
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ const Model canonicalModel = m->makeModel();
+ const std::vector<uint32_t>& opMap = m->getSortedOperationMapping();
+ // init the output array to false for all the operations.
+ std::fill(supportedOps, supportedOps + opMap.size(), false);
+ for (uint32_t i = 0; i < numDevices; i++) {
+ if (devices[i] == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksModel_getSupportedOperationsForDevices passed a nullptr "
+ "as a device";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ for (uint32_t j = i + 1; j < numDevices; j++) {
+ if (devices[i] == devices[j]) {
+ LOG(ERROR) << "ANeuralNetworksModel_getSupportedOperationsForDevices passed "
+ "duplicate devices";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+
+ Device* d = reinterpret_cast<Device*>(const_cast<ANeuralNetworksDevice*>(devices[i]));
+ const MetaModel metaModel(canonicalModel, DeviceManager::get()->strictSlicing());
+ const std::vector<bool> supportsByDevice = d->getSupportedOperations(metaModel);
+ for (uint32_t j = 0; j < supportsByDevice.size(); j++) {
+ uint32_t originalIdx = opMap[j];
+ supportedOps[originalIdx] |= supportsByDevice[j];
+ }
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* /* model */,
+ const ANeuralNetworksDevice* const* /* devices */,
+ uint32_t /* numDevices */,
+ ANeuralNetworksCompilation** /* compilation */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_createForDevices");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_createForDevices unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+struct ExecutionContext {
+ // inputs are always copied before execution while outputs may be set by custom allocation
+ std::vector<void*> outputs;
+ std::vector<size_t> outputSizes;
+ std::vector<bool> isOutputSpecifiedAtIndex;
+ std::vector<const void*> inputs;
+ std::vector<size_t> inputSizes;
+
+ std::unique_ptr<tflite::Interpreter> interpreter;
+
+ ExecutionContext(std::unique_ptr<tflite::Interpreter> interpreter)
+ : outputs(interpreter->outputs().size()),
+ outputSizes(interpreter->outputs().size()),
+ isOutputSpecifiedAtIndex(interpreter->outputs().size(), false),
+ inputs(interpreter->inputs().size()),
+ inputSizes(interpreter->inputs().size()),
+ interpreter(std::move(interpreter)) {}
+};
+
+int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_compute");
+ if (!execution) {
+ LOG(ERROR) << "ANeuralNetworksExecution_compute passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ auto context = reinterpret_cast<ExecutionContext*>(execution);
+ if (std::any_of(context->isOutputSpecifiedAtIndex.begin(),
+ context->isOutputSpecifiedAtIndex.end(), [](bool isSet) { return !isSet; })) {
+ LOG(ERROR) << "ANeuralNetworksExecution_compute not all output buffers are specified";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ auto result = context->interpreter->AllocateTensors();
+ if (result != kTfLiteOk) {
+ LOG(ERROR) << "ANeuralNetworksExecution_compute allocate tensors failed";
+ return ANEURALNETWORKS_OP_FAILED;
+ }
+
+ for (uint32_t index = 0; index < context->interpreter->inputs().size(); index++) {
+ const void* buffer = context->inputs[index];
+ if (buffer == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksExecution_compute not all input buffers are specified";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ size_t length = context->inputSizes[index];
+ std::memcpy(context->interpreter->input_tensor(index)->data.raw, buffer, length);
+ }
+
+ if (context->interpreter->Invoke() != kTfLiteOk) {
+ return ANEURALNETWORKS_OP_FAILED;
+ }
+
+ for (uint32_t i = 0; i < context->interpreter->outputs().size(); i++) {
+ if (context->outputs[i] == nullptr) {
+ continue;
+ }
+
+ const size_t bufferSize = context->outputSizes[i];
+ std::memcpy(context->outputs[i], context->interpreter->output_tensor(i)->data.raw,
+ bufferSize);
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* /* execution */,
+ bool /* measure */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_setMeasureTiming");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_setMeasureTiming unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* /* execution */,
+ int32_t /* durationCode */, uint64_t* /* duration */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_getDuration");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_getDuration unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation,
+ ANeuralNetworksBurst** burst) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksBurst_create");
+ if (!compilation || !burst) {
+ LOG(ERROR) << "ANeuralNetworksBurst_create passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ CompilationBuilder* c = reinterpret_cast<CompilationBuilder*>(compilation);
+ BurstBuilder* b = nullptr;
+ int result = c->createBurst(&b);
+ *burst = reinterpret_cast<ANeuralNetworksBurst*>(b);
+ return result;
+}
+
+void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) {
+ NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksBurst_free");
+ // No validation. Free of nullptr is valid.
+ BurstBuilder* b = reinterpret_cast<BurstBuilder*>(burst);
+ delete b;
+}
+
+int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* /* execution */,
+ ANeuralNetworksBurst* /* burst */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_burstCompute");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_burstCompute unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksMemoryDesc_create");
+ if (desc != nullptr) {
+ *desc = nullptr;
+ }
+ if (!desc) {
+ LOG(ERROR) << "ANeuralNetworksMemoryDesc_create passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ auto mb = std::make_unique<MemoryBuilder>();
+ *desc = reinterpret_cast<ANeuralNetworksMemoryDesc*>(mb.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) {
+ NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksMemoryDesc_free");
+ // No validation. Free of nullptr is valid.
+ MemoryBuilder* mb = reinterpret_cast<MemoryBuilder*>(desc);
+ delete mb;
+}
+
+int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc,
+ const ANeuralNetworksCompilation* compilation,
+ uint32_t index, float frequency) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksMemoryDesc_addInputRole");
+ if (!desc || !compilation) {
+ LOG(ERROR) << "ANeuralNetworksMemoryDesc_addInputRole passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ MemoryBuilder* mb = reinterpret_cast<MemoryBuilder*>(desc);
+ const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation);
+ return mb->addRole(*c, IOType::INPUT, index, frequency);
+}
+
+int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc,
+ const ANeuralNetworksCompilation* compilation,
+ uint32_t index, float frequency) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksMemoryDesc_addOutputRole");
+ if (!desc || !compilation) {
+ LOG(ERROR) << "ANeuralNetworksMemoryDesc_addOutputRole passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ MemoryBuilder* mb = reinterpret_cast<MemoryBuilder*>(desc);
+ const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation);
+ return mb->addRole(*c, IOType::OUTPUT, index, frequency);
+}
+
+int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank,
+ const uint32_t* dimensions) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksMemoryDesc_setDimensions");
+ if (!desc || (!dimensions && rank > 0)) {
+ LOG(ERROR) << "ANeuralNetworksMemoryDesc_setDimensions passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const std::vector<uint32_t> dims(dimensions, dimensions + rank);
+ MemoryBuilder* mb = reinterpret_cast<MemoryBuilder*>(desc);
+ return mb->setDimensions(dims);
+}
+
+int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksMemoryDesc_finish");
+ if (!desc) {
+ LOG(ERROR) << "ANeuralNetworksMemoryDesc_finish passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ MemoryBuilder* mb = reinterpret_cast<MemoryBuilder*>(desc);
+ return mb->finish();
+}
+
+int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc,
+ ANeuralNetworksMemory** memory) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksMemory_createFromDesc");
+ if (memory != nullptr) {
+ *memory = nullptr;
+ }
+ if (!desc || !memory) {
+ LOG(ERROR) << "ANeuralNetworksMemory_createFromDesc passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const MemoryBuilder* mb = reinterpret_cast<const MemoryBuilder*>(desc);
+ auto [n, m] = mb->allocate();
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ *memory = reinterpret_cast<ANeuralNetworksMemory*>(m.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksMemory_copy");
+ if (!src || !dst) {
+ LOG(ERROR) << "ANeuralNetworksMemory_copy passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const RuntimeMemory* s = reinterpret_cast<const RuntimeMemory*>(src);
+ const RuntimeMemory* d = reinterpret_cast<const RuntimeMemory*>(dst);
+ return RuntimeMemory::copy(*s, *d);
+}
+
+int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd, size_t offset,
+ ANeuralNetworksMemory** memory) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksMemory_createFromFd");
+ if (memory != nullptr) {
+ *memory = nullptr;
+ }
+ if (!memory) {
+ LOG(ERROR) << "ANeuralNetworksMemory_createFromFd passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ int n = ANEURALNETWORKS_NO_ERROR;
+ std::unique_ptr<MemoryFd> m;
+ std::tie(n, m) = MemoryFd::create(size, prot, fd, offset);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ *memory = reinterpret_cast<ANeuralNetworksMemory*>(m.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb,
+ ANeuralNetworksMemory** memory) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksMemory_createFromAHardwareBuffer");
+ if (memory != nullptr) {
+ *memory = nullptr;
+ }
+ if (!ahwb || !memory) {
+ LOG(ERROR) << "ANeuralNetworksMemory_createFromAHardwareBuffer passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ int n = ANEURALNETWORKS_NO_ERROR;
+ std::unique_ptr<MemoryAHWB> m;
+ std::tie(n, m) = MemoryAHWB::create(*ahwb);
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ *memory = reinterpret_cast<ANeuralNetworksMemory*>(m.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
+ NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksMemory_free");
+ // No validation. Free of nullptr is valid.
+ RuntimeMemory* m = reinterpret_cast<RuntimeMemory*>(memory);
+ delete m;
+}
+
+int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_create");
+ initVLogMask();
+ if (!model) {
+ LOG(ERROR) << "ANeuralNetworksModel_create passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = new (std::nothrow) FlatbufferModelBuilder();
+ if (m == nullptr) {
+ *model = nullptr;
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *model = reinterpret_cast<ANeuralNetworksModel*>(m);
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksModel_free(ANeuralNetworksModel* model) {
+ NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksModel_free");
+ // No validation. Free of nullptr is valid.
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ delete m;
+}
+
+int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_finish");
+ if (!model) {
+ LOG(ERROR) << "ANeuralNetworksModel_finish passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->finish();
+}
+
+int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model,
+ const ANeuralNetworksOperandType* type) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_addOperand");
+ if (!model || !type) {
+ LOG(ERROR) << "ANeuralNetworksModel_addOperand passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->addOperand(*type);
+}
+
+int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index,
+ const void* buffer, size_t length) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_setOperandValue");
+ if (!model || (!buffer && length != 0)) {
+ LOG(ERROR) << "ANeuralNetworksModel_setOperandValue passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->setOperandValue(index, buffer, length);
+}
+
+int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index,
+ const ANeuralNetworksMemory* memory,
+ size_t offset, size_t length) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_setOperandValueFromMemory");
+ if (!model || !memory) {
+ LOG(ERROR) << "ANeuralNetworksModel_setOperandValue passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const RuntimeMemory* mem = reinterpret_cast<const RuntimeMemory*>(memory);
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->setOperandValueFromMemory(index, mem, offset, length);
+}
+
+int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index,
+ const ANeuralNetworksModel* value) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_setOperandValueFromModel");
+ if (!model || !value) {
+ LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromModel passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const FlatbufferModelBuilder* val = reinterpret_cast<const FlatbufferModelBuilder*>(value);
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->setOperandValueFromModel(index, val);
+}
+
+int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_addOperation");
+ if (!model || !inputs || !outputs) {
+ LOG(ERROR) << "ANeuralNetworksModel_addOperation passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->addOperation(type, inputCount, inputs, outputCount, outputs);
+}
+
+int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
+ ANeuralNetworksModel* model, int32_t index,
+ const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION,
+ "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams");
+ if (!model || !channelQuant) {
+ LOG(ERROR) << "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->setOperandSymmPerChannelQuantParams(index, *channelQuant);
+}
+
+int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount,
+ const uint32_t* inputs, uint32_t outputCount,
+ const uint32_t* outputs) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_identifyInputsAndOutputs");
+ if (!model || !inputs || !outputs) {
+ LOG(ERROR) << ("ANeuralNetworksModel_identifyInputsAndOutputs passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->identifyInputsAndOutputs(inputCount, inputs, outputCount, outputs);
+}
+
+int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_relaxComputationFloat32toFloat16");
+ if (!model) {
+ LOG(ERROR) << ("ANeuralNetworksModel_relaxComputationFloat32toFloat16 passed a nullptr");
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->relaxComputationFloat32toFloat16(allow);
+}
+
+struct CompilationContext {
+ std::unique_ptr<tflite::FlatBufferModel> flatBufferModel;
+ bool isFinished;
+
+ CompilationContext(std::unique_ptr<tflite::FlatBufferModel> flatBufferModel)
+ : flatBufferModel(std::move(flatBufferModel)), isFinished(false) {}
+};
+
+int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model,
+ ANeuralNetworksCompilation** compilation) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_create");
+ if (!model || !compilation) {
+ LOG(ERROR) << "ANeuralNetworksCompilation_create passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+
+ auto tfliteModel = m->createTfliteModel();
+ if (!tfliteModel.ok()) {
+ LOG(ERROR) << "ANeuralNetworksCompilation_create error: " << tfliteModel.error();
+ return ANEURALNETWORKS_OP_FAILED;
+ }
+
+ std::unique_ptr<tflite::FlatBufferModel> flatBufferModel =
+ tflite::FlatBufferModel::BuildFromModel(tfliteModel.value());
+ if (!flatBufferModel) {
+ LOG(ERROR) << "ANeuralNetworksCompilation_create error: tflite::BuildFromModel error";
+ return ANEURALNETWORKS_OP_FAILED;
+ }
+
+ std::unique_ptr<CompilationContext> context =
+ std::make_unique<CompilationContext>(std::move(flatBufferModel));
+ *compilation = reinterpret_cast<ANeuralNetworksCompilation*>(context.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) {
+ NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksCompilation_free");
+ // No validation. Free of nullptr is valid.
+ auto c = reinterpret_cast<CompilationContext*>(compilation);
+ delete c;
+}
+
+int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* /* compilation */,
+ int32_t /* preference */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_setPreference");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_setPreference unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* /* compilation */,
+ const char* /* cacheDir */, const uint8_t* /* token */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_setCaching");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_setCaching unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_finish");
+ if (!compilation) {
+ LOG(ERROR) << "ANeuralNetworksCompilation_finish passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ auto context = reinterpret_cast<CompilationContext*>(compilation);
+ if (context->isFinished) {
+ LOG(ERROR) << "ANeuralNetworksCompilation_finish has already been called";
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+ context->isFinished = true;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* /* compilation */,
+ int /* priority */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_setPriority");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_setPriority unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* /* compilation */,
+ uint64_t /* duration */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_setTimeout");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_setTimeout unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation,
+ ANeuralNetworksExecution** execution) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_create");
+ if (!compilation || !execution) {
+ LOG(ERROR) << "ANeuralNetworksExecution_create passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ auto c = reinterpret_cast<CompilationContext*>(compilation);
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ std::unique_ptr<tflite::Interpreter> interpreter;
+ auto status = tflite::InterpreterBuilder(*c->flatBufferModel, resolver)(&interpreter);
+ if (status != kTfLiteOk) {
+ LOG(ERROR) << "ANeuralNetworksExecution_create error: interpreter build status " << status
+ << " != " << kTfLiteOk;
+ return ANEURALNETWORKS_OP_FAILED;
+ }
+
+ std::unique_ptr<ExecutionContext> context =
+ std::make_unique<ExecutionContext>(std::move(interpreter));
+ *execution = reinterpret_cast<ANeuralNetworksExecution*>(context.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_free");
+ // Free of nullptr is valid.
+ auto r = reinterpret_cast<ExecutionContext*>(execution);
+ delete r;
+}
+
+int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* /* execution */,
+ int32_t /* index */, uint32_t* /* rank */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_getOutputOperandRank");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR)
+ << "ANeuralNetworksExecution_getOutputOperandRank unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* /* execution */,
+ int32_t /* index */,
+ uint32_t* /* dimensions */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_getOutputOperandDimensions");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_getOutputOperandDimensions unimplemented in Neural "
+ "Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index,
+ const ANeuralNetworksOperandType* type, const void* buffer,
+ size_t length) {
+ NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "ANeuralNetworksExecution_setInput");
+ // We do not support dynamic shapes
+ if (type != nullptr) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setInput expected a nullptr for "
+ "ANeuralNetworksOperandType* argument";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ if (!execution || (!buffer && length != 0)) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setInput passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ auto context = reinterpret_cast<ExecutionContext*>(execution);
+ if (index < 0 || index >= static_cast<int32_t>(context->interpreter->inputs().size())) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setInput index out of bounds";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (context->interpreter->input_tensor(index)->bytes != length) {
+ LOG(ERROR)
+ << "ANeuralNetworksExecution_setInput input bytes is different from buffer length";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ context->inputs[index] = buffer;
+ context->inputSizes[index] = length;
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* /* execution */,
+ int32_t /* index */,
+ const ANeuralNetworksOperandType* /* type */,
+ const ANeuralNetworksMemory* /* memory */,
+ size_t /* offset */, size_t /* length */) {
+ NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "ANeuralNetworksExecution_setInputFromMemory");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_setInputFromMemory unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index,
+ const ANeuralNetworksOperandType* type, void* buffer,
+ size_t length) {
+ NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "ANeuralNetworksExecution_setOutput");
+ // We do not support dynamic shapes
+ if (type != nullptr) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setOutput expected a nullptr for "
+ "ANeuralNetworksOperandType* argument";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (!execution || (!buffer && length != 0)) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setOutput passed a nullptr ";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ auto context = reinterpret_cast<ExecutionContext*>(execution);
+ if (index < 0 || index >= static_cast<int32_t>(context->interpreter->outputs().size())) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setOutput index out of bounds";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ const size_t bufferSize = std::max<size_t>(length, 1);
+ if (bufferSize != context->interpreter->output_tensor(index)->bytes) {
+ LOG(ERROR) << "ANeuralNetworksExecution_setOutput length is not equal to the output tensor "
+ "size";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ const intptr_t dataPtrValue = reinterpret_cast<intptr_t>(buffer);
+ if (dataPtrValue % tflite::kDefaultTensorAlignment != 0) {
+ context->outputs[index] = buffer;
+ context->outputSizes[index] = bufferSize;
+ } else {
+ TfLiteCustomAllocation allocation = {.data = buffer, .bytes = bufferSize};
+ context->interpreter->SetCustomAllocationForTensor(context->interpreter->outputs()[index],
+ allocation,
+ kTfLiteCustomAllocationFlagsNone);
+ }
+
+ context->isOutputSpecifiedAtIndex[index] = true;
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* /* execution */,
+ int32_t /* index */,
+ const ANeuralNetworksOperandType* /* type */,
+ const ANeuralNetworksMemory* /* memory */,
+ size_t /* offset */, size_t /* length */) {
+ NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "ANeuralNetworksExecution_setOutputFromMemory");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR)
+ << "ANeuralNetworksExecution_setOutputFromMemory unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* /* execution */,
+ ANeuralNetworksEvent** /* event */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_startCompute");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_startCompute unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* /* execution */,
+ uint64_t /* duration */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_setTimeout");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_setTimeout unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksEvent_wait");
+ if (event == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksEvent_wait passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ IEvent* e = reinterpret_cast<IEvent*>(event);
+ return convertErrorStatusToResultCode(e->wait());
+}
+
+void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksEvent_free");
+ // No validation. Free of nullptr is valid.
+ if (event) {
+ IEvent* e = reinterpret_cast<IEvent*>(event);
+ e->wait();
+ delete e;
+ }
+}
+
+int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* /* execution */,
+ uint64_t /* duration */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_setLoopTimeout");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_setLoopTimeout unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+uint64_t ANeuralNetworks_getDefaultLoopTimeout() {
+ return operation_while::kTimeoutNsDefault;
+}
+
+uint64_t ANeuralNetworks_getMaximumLoopTimeout() {
+ return operation_while::kTimeoutNsMaximum;
+}
+
+int ANeuralNetworksDevice_getExtensionSupport(const ANeuralNetworksDevice* device,
+ const char* extensionName,
+ bool* isExtensionSupported) {
+ if (device == nullptr || extensionName == nullptr || isExtensionSupported == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksDevice_getExtensionSupport passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ const Device* d = reinterpret_cast<const Device*>(device);
+ const auto& supportedExtensions = d->getSupportedExtensions();
+ *isExtensionSupported = std::any_of(supportedExtensions.begin(), supportedExtensions.end(),
+ [extensionName](const auto& supportedExtension) {
+ return supportedExtension.name == extensionName;
+ });
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_getExtensionOperandType(ANeuralNetworksModel* model,
+ const char* extensionName,
+ uint16_t operandCodeWithinExtension,
+ int32_t* type) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_getExtensionOperandType");
+ if (!model || !extensionName || !type) {
+ LOG(ERROR) << "ANeuralNetworksModel_getExtensionOperandType passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->getExtensionType(extensionName, operandCodeWithinExtension, type);
+}
+
+int ANeuralNetworksModel_getExtensionOperationType(ANeuralNetworksModel* model,
+ const char* extensionName,
+ uint16_t operationCodeWithinExtension,
+ ANeuralNetworksOperationType* type) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_getExtensionOperationType");
+ if (!model || !extensionName || !type) {
+ LOG(ERROR) << "ANeuralNetworksModel_getExtensionOperationType passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->getExtensionType(extensionName, operationCodeWithinExtension, type);
+}
+
+int ANeuralNetworksModel_setOperandExtensionData(ANeuralNetworksModel* model, int32_t index,
+ const void* data, size_t length) {
+ NNTRACE_RT(NNTRACE_PHASE_PREPARATION, "ANeuralNetworksModel_setOperandExtensionData");
+ if (!model || (!data && length != 0)) {
+ LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ FlatbufferModelBuilder* m = reinterpret_cast<FlatbufferModelBuilder*>(model);
+ return m->setOperandExtensionData(index, data, length);
+}
+
+int ANeuralNetworksCompilation_addExtensionAttribute(ANeuralNetworksCompilation* /* compilation */,
+ const char* /* extensionName */,
+ uint16_t /* attributeCodeWithinExtension */,
+ const void* /* data */, size_t /* length */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "ANeuralNetworksCompilation_addExtensionAttribute");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_addExtensionAttribute unimplemented in Neural "
+ "Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_addExtensionAttribute(ANeuralNetworksExecution* /* execution */,
+ const char* /* extensionName */,
+ uint16_t /* attributeCodeWithinExtension */,
+ const void* /* data */, size_t /* length */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_addExtensionAttribute");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR)
+ << "ANeuralNetworksExecution_addExtensionAttribute unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksEvent_createFromSyncFenceFd(int syncFenceFd, ANeuralNetworksEvent** event) {
+ if (event == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksEvent_createFromSyncFenceFd passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ if (syncFenceFd <= 0) {
+ LOG(ERROR) << "ANeuralNetworksEvent_createFromSyncFenceFd passed an invalid fd: "
+ << syncFenceFd;
+ *event = nullptr;
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ std::unique_ptr<SyncFenceEvent> e =
+ std::make_unique<SyncFenceEvent>(syncFenceFd, nullptr, nullptr);
+ *event = reinterpret_cast<ANeuralNetworksEvent*>(e.release());
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* syncFenceFd) {
+ if (syncFenceFd == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksEvent_getSyncFenceFd passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ *syncFenceFd = -1;
+ if (event == nullptr) {
+ LOG(ERROR) << "ANeuralNetworksEvent_getSyncFenceFd passed a nullptr";
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const IEvent* e = reinterpret_cast<const IEvent*>(event);
+ // The client owns the dupped fd, and is responsible for closing it.
+ *syncFenceFd = e->getSyncFenceFd(/*shouldDup*/ true);
+ if (*syncFenceFd <= 0) {
+ LOG(ERROR) << "ANeuralNetworksEvent_getSyncFenceFd unable to get valid sync_fence fd";
+ *syncFenceFd = -1;
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_startComputeWithDependencies(
+ ANeuralNetworksExecution* /* execution */,
+ const ANeuralNetworksEvent* const* /* dependencies */, uint32_t /* numOfDependencies */,
+ uint64_t /* duration */, ANeuralNetworksEvent** /* event */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_startComputeWithDependencies");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_startComputeWithDependencies unimplemented in Neural "
+ "Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int64_t ANeuralNetworks_getRuntimeFeatureLevel() {
+ return getRuntimeFeatureLevelImpl();
+}
+
+int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* /* execution */,
+ bool /* enable */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_enableInputAndOutputPadding");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_enableInputAndOutputPadding unimplemented in Neural "
+ "Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
+ const ANeuralNetworksCompilation* /* compilation */, uint32_t /* index */,
+ uint32_t* /* alignment */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+ "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput unimplemented in "
+ "Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
+ const ANeuralNetworksCompilation* /* compilation */, uint32_t /* index */,
+ uint32_t* /* padding */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+ "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput unimplemented in "
+ "Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
+ const ANeuralNetworksCompilation* /* compilation */, uint32_t /* index */,
+ uint32_t* /* alignment */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+ "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR)
+ << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput unimplemented in "
+ "Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
+ const ANeuralNetworksCompilation* /* compilation */, uint32_t /* index */,
+ uint32_t* /* padding */) {
+ NNTRACE_RT(NNTRACE_PHASE_COMPILATION,
+ "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput unimplemented in "
+ "Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
+
+int ANeuralNetworksExecution_setReusable(ANeuralNetworksExecution* /* execution */,
+ bool /* reusable */) {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_setReusable");
+ // Not supported yet in NNAPI v2
+ LOG(ERROR) << "ANeuralNetworksExecution_setReusable unimplemented in Neural Networks V2";
+ return ANEURALNETWORKS_OP_FAILED;
+}
diff --git a/runtime/operation_converters/AddOperationConverter.cpp b/runtime/operation_converters/AddOperationConverter.cpp
new file mode 100644
index 000000000..4203a734a
--- /dev/null
+++ b/runtime/operation_converters/AddOperationConverter.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AddOperationConverter.h"
+
+#include <vector>
+
+#include "OperationConverterResolver.h"
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+Result<void> AddOperationConverter::convert(const Operation& operation,
+ SubGraphContext* context) const {
+ const Model::Subgraph* subgraph = context->getSubgraph();
+
+ // add opcode for ADD if not added yet
+ uint32_t opCodeIdx = context->addOpCode(OperationType::ADD);
+
+ std::vector<int32_t> inputs = NN_TRY(getArithmeticInputs(operation, context));
+ std::vector<int32_t> outputs = NN_TRY(getArithmeticOutputs(operation, context));
+
+ int baseOptionsIdx = 2;
+
+ // activation
+ const Operand& activationOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kActivationOffset]];
+ NN_RET_CHECK(isOperandConstant(activationOperand));
+ FusedActivationFunc activation = static_cast<FusedActivationFunc>(
+ context->getConstantScalar<int32_t>(activationOperand));
+
+ auto optionsFlatbuffer = tflite::CreateAddOptions(
+ context->getBuilder(),
+ NN_TRY(getTfliteActivation(activation)) /* fused_activation_function */);
+ auto operatorFlatbuffer = tflite::CreateOperatorDirect(
+ context->getBuilder() /* builder */, opCodeIdx /* opcode_index */, &inputs /* inputs */,
+ &outputs /* outputs */,
+ tflite::BuiltinOptions::BuiltinOptions_AddOptions /* builtin_options_type */,
+ optionsFlatbuffer.Union() /* builtin_options */);
+ context->addOperatorFlatbuffer(operatorFlatbuffer);
+
+ return {};
+}
+
+NN_REGISTER_OPERATION_CONVERTER(ADD, AddOperationConverter);
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/AddOperationConverter.h b/runtime/operation_converters/AddOperationConverter.h
new file mode 100644
index 000000000..ab82bfb54
--- /dev/null
+++ b/runtime/operation_converters/AddOperationConverter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_ADD_OPERATION_CONVERTER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_ADD_OPERATION_CONVERTER_H
+
+#include <vector>
+
+#include "ArithmeticOperationConverter.h"
+
+namespace android {
+namespace nn {
+
+class AddOperationConverter : public ArithmeticOperationConverterBase {
+ public:
+ Result<void> convert(const Operation& operation, SubGraphContext* context) const override;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_ADD_OPERATION_CONVERTER_H \ No newline at end of file
diff --git a/runtime/operation_converters/ArithmeticOperationConverter.cpp b/runtime/operation_converters/ArithmeticOperationConverter.cpp
new file mode 100644
index 000000000..e97a302b1
--- /dev/null
+++ b/runtime/operation_converters/ArithmeticOperationConverter.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ArithmeticOperationConverter.h"
+
+#include <vector>
+
+#include "OperationConverterResolver.h"
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+Result<std::vector<int32_t>> ArithmeticOperationConverterBase::getArithmeticInputs(
+ const Operation& operation, SubGraphContext* context) const {
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.inputs[kInput1TensorIdx]));
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.inputs[kInput2TensorIdx]));
+ std::vector<int32_t> inputs{
+ context->getTensorIdxFromOperandIdx(operation.inputs[kInput1TensorIdx]),
+ context->getTensorIdxFromOperandIdx(operation.inputs[kInput2TensorIdx])};
+ return inputs;
+}
+
+Result<std::vector<int32_t>> ArithmeticOperationConverterBase::getArithmeticOutputs(
+ const Operation& operation, SubGraphContext* context) const {
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.outputs[kOutputTensorIdx]));
+ std::vector<int32_t> outputs{
+ context->getTensorIdxFromOperandIdx(operation.outputs[kOutputTensorIdx])};
+ return outputs;
+}
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/ArithmeticOperationConverter.h b/runtime/operation_converters/ArithmeticOperationConverter.h
new file mode 100644
index 000000000..d5dbcf680
--- /dev/null
+++ b/runtime/operation_converters/ArithmeticOperationConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_ARITHMETIC_OPERATION_CONVERTER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_ARITHMETIC_OPERATION_CONVERTER_H
+
+#include <vector>
+
+#include "OperationConverter.h"
+
+namespace android {
+namespace nn {
+
+class ArithmeticOperationConverterBase : public IOperationConverter {
+ protected:
+ Result<std::vector<int32_t>> getArithmeticInputs(const Operation& operation,
+ SubGraphContext* context) const;
+ Result<std::vector<int32_t>> getArithmeticOutputs(const Operation& operation,
+ SubGraphContext* context) const;
+
+ // Offset locations of BuiltinOption parameters in NNAPI Operand inputs
+ static constexpr int kActivationOffset = 0;
+
+ private:
+ // Locations of Operator inputs in a NNAPI Operation
+ static constexpr int kInput1TensorIdx = 0;
+ static constexpr int kInput2TensorIdx = 1;
+
+ // Location of Operator outputs in a NNAPI Operation
+ static constexpr int kOutputTensorIdx = 0;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_ARITHMETIC_OPERATION_CONVERTER_H \ No newline at end of file
diff --git a/runtime/operation_converters/Conv2DOperationConverter.cpp b/runtime/operation_converters/Conv2DOperationConverter.cpp
new file mode 100644
index 000000000..c88ff056c
--- /dev/null
+++ b/runtime/operation_converters/Conv2DOperationConverter.cpp
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Conv2DOperationConverter.h"
+
+#include <vector>
+
+#include "OperationConverterResolver.h"
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+Result<std::vector<int32_t>> Conv2DOperationConverter::getConv2DInputs(
+ const Operation& operation, SubGraphContext* context) const {
+ NN_RET_CHECK(isOperandConstant(
+ context->getSubgraph()->operands[operation.inputs[kFilterTensorIdx]]));
+
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.inputs[kInputTensorIdx]));
+ // TFLite does not support asymmetric tensors for convolution filters
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.inputs[kFilterTensorIdx],
+ true /* makeSymmetric */));
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.inputs[kBiasTensorIdx]));
+ std::vector<int32_t> inputs{
+ context->getTensorIdxFromOperandIdx(operation.inputs[kInputTensorIdx]),
+ context->getTensorIdxFromOperandIdx(operation.inputs[kFilterTensorIdx]),
+ context->getTensorIdxFromOperandIdx(operation.inputs[kBiasTensorIdx])};
+ return inputs;
+}
+
+Result<std::vector<int32_t>> Conv2DOperationConverter::getConv2DOutputs(
+ const Operation& operation, SubGraphContext* context) const {
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.outputs[kOutputTensorIdx]));
+ std::vector<int32_t> outputs{
+ context->getTensorIdxFromOperandIdx(operation.outputs[kOutputTensorIdx])};
+ return outputs;
+}
+
+Result<int> Conv2DOperationConverter::decomposeExplicitPadding(const Operation& operation,
+ SubGraphContext* context) const {
+ const Model::Subgraph* subgraph = context->getSubgraph();
+ const Operand& inputOperand = subgraph->operands[operation.inputs[0]];
+
+ // add opcode for PAD if it does not exist yet
+ uint32_t opCodeIdx = context->addOpCode(OperationType::PAD);
+
+ // pad options
+ auto padOptionsFlatbuffer = tflite::CreatePadOptions(context->getBuilder());
+
+ // check to make sure padding Operands are constants
+ const Operand& frontWidthPaddingOperand = subgraph->operands[operation.inputs[3]];
+ const Operand& backWidthPaddingOperand = subgraph->operands[operation.inputs[4]];
+ const Operand& frontHeightPaddingOperand = subgraph->operands[operation.inputs[5]];
+ const Operand& backHeightPaddingOperand = subgraph->operands[operation.inputs[6]];
+ NN_RET_CHECK(isOperandConstant(frontWidthPaddingOperand));
+ NN_RET_CHECK(isOperandConstant(backWidthPaddingOperand));
+ NN_RET_CHECK(isOperandConstant(frontHeightPaddingOperand));
+ NN_RET_CHECK(isOperandConstant(backHeightPaddingOperand));
+
+ // get padding params
+ int32_t frontHeightPadding = context->getConstantScalar<int32_t>(frontHeightPaddingOperand);
+ int32_t backHeightPadding = context->getConstantScalar<int32_t>(backHeightPaddingOperand);
+ int32_t frontWidthPadding = context->getConstantScalar<int32_t>(frontWidthPaddingOperand);
+ int32_t backWidthPadding = context->getConstantScalar<int32_t>(backWidthPaddingOperand);
+
+ // build padding buffer
+ const Dimensions& dims = inputOperand.dimensions;
+ int numDimensionsInput = static_cast<int>(dims.size());
+ std::vector<int32_t> paddingData(numDimensionsInput * 2, 0);
+ paddingData[2] = frontHeightPadding;
+ paddingData[3] = backHeightPadding;
+ paddingData[4] = frontWidthPadding;
+ paddingData[5] = backWidthPadding;
+ uint32_t paddingBufferIdx = context->addBufferFromData(
+ reinterpret_cast<uint8_t*>(paddingData.data()), paddingData.size() * sizeof(int32_t));
+
+ // create new tensor for padding
+ std::vector<int32_t> padShape{numDimensionsInput, 2};
+ auto padTensor = tflite::CreateTensorDirect(context->getBuilder(), &padShape /* shape */,
+ tflite::TensorType::TensorType_INT32 /* type */,
+ paddingBufferIdx /* buffer */);
+ int padTensorIdx = context->addTensorFlatbuffer(padTensor);
+
+ // add inputs for padding operation
+ std::vector<int32_t> padInputs = {context->getTensorIdxFromOperandIdx(operation.inputs[0]),
+ padTensorIdx};
+
+ // get dimensions of output of pad operation
+ std::vector<int32_t> padToConv2dShape(dims.begin(), dims.end());
+ // keep unknown height and width dimensions unknown
+ padToConv2dShape[1] = padToConv2dShape[1] != 0
+ ? frontHeightPadding + padToConv2dShape[1] + backHeightPadding
+ : -1;
+ padToConv2dShape[2] = padToConv2dShape[2] != 0
+ ? frontWidthPadding + padToConv2dShape[2] + backWidthPadding
+ : -1;
+ replaceZeroDimensions(&padToConv2dShape);
+
+ // build quantization parameters
+ std::vector<float> scaleVector{inputOperand.scale};
+ std::vector<int64_t> zeroPointVector{inputOperand.zeroPoint};
+ // min and max used to convert TFLite models to TF models, so it is unused in this case and can
+ // be set to 0
+ std::vector<float> minVector{0};
+ std::vector<float> maxVector{0};
+ auto quantizationParams = tflite::CreateQuantizationParametersDirect(
+ context->getBuilder(), &minVector /* min */, &maxVector /* max */,
+ &scaleVector /* scale */, &zeroPointVector /* zero_point */,
+ tflite::QuantizationDetails::QuantizationDetails_NONE /* details_type */);
+
+ // create new tensor to be output of pad & input for conv2d
+ auto padToConv2dTensor = tflite::CreateTensorDirect(
+ context->getBuilder(), &padToConv2dShape /* shape */,
+ NN_TRY(getTensorFlatbufferOperandType(inputOperand.type)) /* type */, 0 /* buffer */,
+ 0 /* name */, quantizationParams /* quantization */);
+ int padToConv2dTensorIdx = context->addTensorFlatbuffer(padToConv2dTensor);
+
+ // set output for padding operation and add to operators
+ std::vector<int32_t> padOutputs{padToConv2dTensorIdx};
+
+ OperatorFlatbuffer padOp = tflite::CreateOperatorDirect(
+ context->getBuilder(), opCodeIdx, &padInputs, &padOutputs,
+ tflite::BuiltinOptions::BuiltinOptions_PadOptions, padOptionsFlatbuffer.Union());
+ context->addOperatorFlatbuffer(padOp);
+
+ // Return tensor index of pad output created
+ return padToConv2dTensorIdx;
+}
+
+Result<void> Conv2DOperationConverter::convert(const Operation& operation,
+ SubGraphContext* context) const {
+ const Model::Subgraph* subgraph = context->getSubgraph();
+
+ // add opcode for CONV_2D if not added yet
+ uint32_t opCodeIdx = context->addOpCode(OperationType::CONV_2D);
+
+ // if there are less than 8 inputs or the input at the 7th index is a BOOL, there is implicit
+ // padding
+ bool isImplicitPadding = false;
+ if (operation.inputs.size() < 8 ||
+ subgraph->operands[operation.inputs[7]].type == OperandType::BOOL) {
+ isImplicitPadding = true;
+ }
+
+ std::vector<int32_t> inputs = NN_TRY(getConv2DInputs(operation, context));
+ std::vector<int32_t> outputs = NN_TRY(getConv2DOutputs(operation, context));
+
+ // if explicit padding, we need to decompose the operation to a separate padding op and a conv2d
+ // op
+ if (!isImplicitPadding) {
+ auto padOpIdx = NN_TRY(decomposeExplicitPadding(operation, context));
+ inputs[0] = padOpIdx;
+ }
+
+ int baseOptionsIdx = 4;
+ tflite::Padding padding;
+ if (isImplicitPadding) {
+ const Operand& paddingTypeOperand = subgraph->operands[operation.inputs[3]];
+ NN_RET_CHECK(isOperandConstant(paddingTypeOperand));
+
+ int32_t paddingType = context->getConstantScalar<int32_t>(paddingTypeOperand);
+ padding = getTFLitePadding(paddingType);
+ } else {
+ padding = tflite::Padding::Padding_VALID;
+ baseOptionsIdx = 7;
+ }
+
+ // check if stride and activation Operands are constant
+ const Operand& strideWOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kStrideWOffset]];
+ const Operand& strideHOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kStrideHOffset]];
+ const Operand& activationOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kActivationOffset]];
+ NN_RET_CHECK(isOperandConstant(strideWOperand));
+ NN_RET_CHECK(isOperandConstant(strideHOperand));
+ NN_RET_CHECK(isOperandConstant(activationOperand));
+
+ // get strides and activation
+ int32_t strideW = context->getConstantScalar<int32_t>(strideWOperand);
+ int32_t strideH = context->getConstantScalar<int32_t>(strideHOperand);
+ FusedActivationFunc activation = static_cast<FusedActivationFunc>(
+ context->getConstantScalar<int32_t>(activationOperand));
+
+ // check for nchw
+ int isNchwIdx = baseOptionsIdx + kIsNchwOffset;
+ if (operation.inputs.size() > static_cast<uint32_t>(isNchwIdx)) {
+ const Operand& isNchwOperand = subgraph->operands[operation.inputs[isNchwIdx]];
+ NN_RET_CHECK(isOperandConstant(isNchwOperand));
+
+ bool isNchw = context->getConstantScalar<bool>(isNchwOperand);
+ NN_RET_CHECK(!isNchw) << "TFLite does not support NCHW formatted input tensors";
+ }
+
+ // dilations
+ int dilationWIdx = baseOptionsIdx + kDilationWOffset;
+ int dilationHIdx = baseOptionsIdx + kDilationHOffset;
+ // default dilation factors are 1
+ int32_t dilationW = 1;
+ int32_t dilationH = 1;
+ if (operation.inputs.size() > static_cast<uint32_t>(dilationWIdx)) {
+ const Operand& dilationWOperand = subgraph->operands[operation.inputs[dilationWIdx]];
+ NN_RET_CHECK(isOperandConstant(dilationWOperand));
+
+ dilationW = context->getConstantScalar<int32_t>(dilationWOperand);
+ }
+ if (operation.inputs.size() > static_cast<uint32_t>(dilationHIdx)) {
+ const Operand& dilationHOperand = subgraph->operands[operation.inputs[dilationHIdx]];
+ NN_RET_CHECK(isOperandConstant(dilationHOperand));
+
+ dilationH = context->getConstantScalar<int32_t>(dilationHOperand);
+ }
+
+ flatbuffers::Offset<tflite::Conv2DOptions> optionsFlatbuffer = tflite::CreateConv2DOptions(
+ context->getBuilder(), padding, strideW, strideH,
+ NN_TRY(getTfliteActivation(activation)) /* fused_activation_function */, dilationW,
+ dilationH);
+ auto operatorFlatbuffer = tflite::CreateOperatorDirect(
+ context->getBuilder() /* builder */, opCodeIdx /* opcode_index */, &inputs /* inputs */,
+ &outputs /* outputs */,
+ tflite::BuiltinOptions::BuiltinOptions_Conv2DOptions /* builtin_options_type */,
+ optionsFlatbuffer.Union() /* builtin_options */);
+ context->addOperatorFlatbuffer(operatorFlatbuffer);
+
+ return {};
+}
+
+NN_REGISTER_OPERATION_CONVERTER(CONV_2D, Conv2DOperationConverter);
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/Conv2DOperationConverter.h b/runtime/operation_converters/Conv2DOperationConverter.h
new file mode 100644
index 000000000..398aaffd9
--- /dev/null
+++ b/runtime/operation_converters/Conv2DOperationConverter.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_CONV2D_OPERATION_CONVERTER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_CONV2D_OPERATION_CONVERTER_H
+
+#include <vector>
+
+#include "OperationConverter.h"
+
+namespace android {
+namespace nn {
+
+class Conv2DOperationConverter : public IOperationConverter {
+ public:
+ Result<void> convert(const Operation& operation, SubGraphContext* context) const override;
+
+ protected:
+ Result<std::vector<int32_t>> getConv2DInputs(const Operation& operation,
+ SubGraphContext* context) const;
+ Result<std::vector<int32_t>> getConv2DOutputs(const Operation& operation,
+ SubGraphContext* context) const;
+
+ // Returns the output Tensor index of created Padding Operator if successful
+ Result<int> decomposeExplicitPadding(const Operation& operation,
+ SubGraphContext* context) const;
+
+ private:
+ // Offset locations of BuiltinOption parameters in NNAPI Operand inputs
+ static constexpr int kStrideWOffset = 0;
+ static constexpr int kStrideHOffset = 1;
+ static constexpr int kActivationOffset = 2;
+ static constexpr int kIsNchwOffset = 3;
+ static constexpr int kDilationWOffset = 4;
+ static constexpr int kDilationHOffset = 5;
+
+ // Locations of Operator inputs in a NNAPI Operation
+ static constexpr int kInputTensorIdx = 0;
+ static constexpr int kFilterTensorIdx = 1;
+ static constexpr int kBiasTensorIdx = 2;
+
+ // Location of Operator outputs in a NNAPI Operation
+ static constexpr int kOutputTensorIdx = 0;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_CONV2D_OPERATION_CONVERTER_H \ No newline at end of file
diff --git a/runtime/operation_converters/DepthwiseConv2DOperationConverter.cpp b/runtime/operation_converters/DepthwiseConv2DOperationConverter.cpp
new file mode 100644
index 000000000..eb0e3b578
--- /dev/null
+++ b/runtime/operation_converters/DepthwiseConv2DOperationConverter.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DepthwiseConv2DOperationConverter.h"
+
+#include <vector>
+
+#include "OperationConverterResolver.h"
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+Result<void> DepthwiseConv2DOperationConverter::convert(const Operation& operation,
+ SubGraphContext* context) const {
+ const Model::Subgraph* subgraph = context->getSubgraph();
+
+ // add opcode for DEPTHWISE_CONV_2D if not added yet
+ uint32_t opCodeIdx = context->addOpCode(OperationType::DEPTHWISE_CONV_2D);
+
+ // if there are less than 9 inputs or the input at the 8th index is a BOOL, there is implicit
+ // padding
+ const bool isImplicitPadding =
+ (operation.inputs.size() < 9 ||
+ subgraph->operands[operation.inputs[8]].type == OperandType::BOOL);
+
+ std::vector<int32_t> inputs = NN_TRY(getConv2DInputs(operation, context));
+ std::vector<int32_t> outputs = NN_TRY(getConv2DOutputs(operation, context));
+
+ // if explicit padding, we need to decompose the operation to a separate padding op and a conv2d
+ // op
+ if (!isImplicitPadding) {
+ auto padOpIdx = NN_TRY(decomposeExplicitPadding(operation, context));
+ inputs[0] = padOpIdx;
+ }
+
+ int baseOptionsIdx = 4;
+ tflite::Padding padding;
+ if (isImplicitPadding) {
+ const Operand& paddingTypeOperand = subgraph->operands[operation.inputs[3]];
+ NN_RET_CHECK(isOperandConstant(paddingTypeOperand));
+
+ int32_t paddingType = context->getConstantScalar<int32_t>(paddingTypeOperand);
+ padding = getTFLitePadding(paddingType);
+ } else {
+ padding = tflite::Padding::Padding_VALID;
+ baseOptionsIdx = 7;
+ }
+
+ // check if stride, depthwise multiplier, and activation Operands are constant
+ const Operand& strideWOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kStrideWOffset]];
+ const Operand& strideHOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kStrideHOffset]];
+ const Operand& activationOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kActivationOffset]];
+ const Operand& depthwiseMultiplierOperand =
+ subgraph->operands[operation.inputs[baseOptionsIdx + kDepthwiseMultiplier]];
+ NN_RET_CHECK(isOperandConstant(strideWOperand));
+ NN_RET_CHECK(isOperandConstant(strideHOperand));
+ NN_RET_CHECK(isOperandConstant(activationOperand));
+ NN_RET_CHECK(isOperandConstant(depthwiseMultiplierOperand));
+
+ // get strides and activation
+ int32_t strideW = context->getConstantScalar<int32_t>(strideWOperand);
+ int32_t strideH = context->getConstantScalar<int32_t>(strideHOperand);
+ int32_t depthwiseMultiplier = context->getConstantScalar<int32_t>(depthwiseMultiplierOperand);
+ FusedActivationFunc activation = static_cast<FusedActivationFunc>(
+ context->getConstantScalar<int32_t>(activationOperand));
+
+ // check for nchw
+ int isNchwIdx = baseOptionsIdx + kIsNchwOffset;
+ if (operation.inputs.size() > static_cast<uint32_t>(isNchwIdx)) {
+ const Operand& isNchwOperand = subgraph->operands[operation.inputs[isNchwIdx]];
+ NN_RET_CHECK(isOperandConstant(isNchwOperand));
+
+ bool isNchw = context->getConstantScalar<bool>(isNchwOperand);
+ NN_RET_CHECK(!isNchw) << "TFLite does not support NCHW formatted input tensors";
+ }
+
+ // dilations
+ int dilationWIdx = baseOptionsIdx + kDilationWOffset;
+ int dilationHIdx = baseOptionsIdx + kDilationHOffset;
+ // default dilation factors are 1
+ int32_t dilationW = 1;
+ int32_t dilationH = 1;
+ if (operation.inputs.size() > static_cast<uint32_t>(dilationWIdx)) {
+ const Operand& dilationWOperand = subgraph->operands[operation.inputs[dilationWIdx]];
+ NN_RET_CHECK(isOperandConstant(dilationWOperand));
+
+ dilationW = context->getConstantScalar<int32_t>(dilationWOperand);
+ }
+ if (operation.inputs.size() > static_cast<uint32_t>(dilationHIdx)) {
+ const Operand& dilationHOperand = subgraph->operands[operation.inputs[dilationHIdx]];
+ NN_RET_CHECK(isOperandConstant(dilationHOperand));
+
+ dilationH = context->getConstantScalar<int32_t>(dilationHOperand);
+ }
+
+ flatbuffers::Offset<tflite::DepthwiseConv2DOptions> optionsFlatbuffer =
+ tflite::CreateDepthwiseConv2DOptions(
+ context->getBuilder(), padding, strideW, strideH, depthwiseMultiplier,
+ NN_TRY(getTfliteActivation(activation)) /* fused_activation_function */,
+ dilationW, dilationH);
+ auto operatorFlatbuffer = tflite::CreateOperatorDirect(
+ context->getBuilder() /* builder */, opCodeIdx /* opcode_index */, &inputs /* inputs */,
+ &outputs /* outputs */,
+ tflite::BuiltinOptions::
+ BuiltinOptions_DepthwiseConv2DOptions /* builtin_options_type */,
+ optionsFlatbuffer.Union() /* builtin_options */);
+ context->addOperatorFlatbuffer(operatorFlatbuffer);
+
+ return {};
+}
+
+NN_REGISTER_OPERATION_CONVERTER(DEPTHWISE_CONV_2D, DepthwiseConv2DOperationConverter);
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/DepthwiseConv2DOperationConverter.h b/runtime/operation_converters/DepthwiseConv2DOperationConverter.h
new file mode 100644
index 000000000..37302d7e3
--- /dev/null
+++ b/runtime/operation_converters/DepthwiseConv2DOperationConverter.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_DEPTHWISE_CONV2D_OPERATION_CONVERTER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_DEPTHWISE_CONV2D_OPERATION_CONVERTER_H
+
+#include <vector>
+
+#include "Conv2DOperationConverter.h"
+
+namespace android {
+namespace nn {
+
+class DepthwiseConv2DOperationConverter : public Conv2DOperationConverter {
+ public:
+ Result<void> convert(const Operation& operation, SubGraphContext* context) const override;
+
+ private:
+ // Offset locations of BuiltinOption parameters in NNAPI Operand inputs
+ static constexpr int kStrideWOffset = 0;
+ static constexpr int kStrideHOffset = 1;
+ static constexpr int kDepthwiseMultiplier = 2;
+ static constexpr int kActivationOffset = 3;
+ static constexpr int kIsNchwOffset = 4;
+ static constexpr int kDilationWOffset = 5;
+ static constexpr int kDilationHOffset = 6;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_DEPTHWISE_CONV2D_OPERATION_CONVERTER_H \ No newline at end of file
diff --git a/runtime/operation_converters/LogisticOperationConverter.cpp b/runtime/operation_converters/LogisticOperationConverter.cpp
new file mode 100644
index 000000000..20528f40c
--- /dev/null
+++ b/runtime/operation_converters/LogisticOperationConverter.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LogisticOperationConverter.h"
+
+#include <vector>
+
+#include "OperationConverterResolver.h"
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+Result<std::vector<int32_t>> LogisticOperationConverter::getLogisticInputs(
+ const Operation& operation, SubGraphContext* context) const {
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.inputs[kInputTensorIdx]));
+ std::vector<int32_t> inputs{
+ context->getTensorIdxFromOperandIdx(operation.inputs[kInputTensorIdx])};
+ return inputs;
+}
+
+Result<std::vector<int32_t>> LogisticOperationConverter::getLogisticOutputs(
+ const Operation& operation, SubGraphContext* context) const {
+ NN_TRY(context->createTensorFlatbufferFromOperand(operation.outputs[kOutputTensorIdx]));
+ std::vector<int32_t> outputs{
+ context->getTensorIdxFromOperandIdx(operation.outputs[kOutputTensorIdx])};
+ return outputs;
+}
+
+Result<void> LogisticOperationConverter::convert(const Operation& operation,
+ SubGraphContext* context) const {
+ // add opcode for LOGISTIC if not added yet
+ uint32_t opCodeIdx = context->addOpCode(OperationType::LOGISTIC);
+
+ std::vector<int32_t> inputs = NN_TRY(getLogisticInputs(operation, context));
+ std::vector<int32_t> outputs = NN_TRY(getLogisticOutputs(operation, context));
+
+ auto optionsFlatbuffer = tflite::CreateLogSoftmaxOptions(context->getBuilder());
+ auto operatorFlatbuffer = tflite::CreateOperatorDirect(
+ context->getBuilder() /* builder */, opCodeIdx /* opcode_index */, &inputs /* inputs */,
+ &outputs /* outputs */,
+ tflite::BuiltinOptions::BuiltinOptions_LogSoftmaxOptions /* builtin_options_type */,
+ optionsFlatbuffer.Union() /* builtin_options */);
+ context->addOperatorFlatbuffer(operatorFlatbuffer);
+
+ return {};
+}
+
+NN_REGISTER_OPERATION_CONVERTER(LOGISTIC, LogisticOperationConverter);
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/LogisticOperationConverter.h b/runtime/operation_converters/LogisticOperationConverter.h
new file mode 100644
index 000000000..dc8dcccc3
--- /dev/null
+++ b/runtime/operation_converters/LogisticOperationConverter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_LOGISTIC_OPERATION_CONVERTER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_LOGISTIC_OPERATION_CONVERTER_H
+
+#include <vector>
+
+#include "OperationConverter.h"
+
+namespace android {
+namespace nn {
+
+class LogisticOperationConverter : public IOperationConverter {
+ public:
+ Result<void> convert(const Operation& operation, SubGraphContext* context) const override;
+
+ private:
+ Result<std::vector<int32_t>> getLogisticInputs(const Operation& operation,
+ SubGraphContext* context) const;
+ Result<std::vector<int32_t>> getLogisticOutputs(const Operation& operation,
+ SubGraphContext* context) const;
+
+ // Location of Operator inputs in a NNAPI Operation
+ static constexpr int kInputTensorIdx = 0;
+
+ // Location of Operator outputs in a NNAPI Operation
+ static constexpr int kOutputTensorIdx = 0;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_LOGISTIC_OPERATION_CONVERTER_H \ No newline at end of file
diff --git a/runtime/operation_converters/OperationConverter.h b/runtime/operation_converters/OperationConverter.h
new file mode 100644
index 000000000..abe8d4a52
--- /dev/null
+++ b/runtime/operation_converters/OperationConverter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_OPERATION_CONVERTER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_OPERATION_CONVERTER_H
+
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+class IOperationConverter {
+ public:
+ virtual ~IOperationConverter() = default;
+
+ virtual Result<void> convert(const Operation& operation, SubGraphContext* context) const = 0;
+};
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_OPERATION_CONVERTER_H \ No newline at end of file
diff --git a/runtime/operation_converters/OperationConverterResolver.cpp b/runtime/operation_converters/OperationConverterResolver.cpp
new file mode 100644
index 000000000..530b95965
--- /dev/null
+++ b/runtime/operation_converters/OperationConverterResolver.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "OperationConverterResolver"
+
+#include "OperationConverterResolver.h"
+
+#include "OperationsUtils.h"
+
+namespace android {
+namespace nn {
+
+#define NN_FORWARD_DECLARE_OPERATION_CONVERTER_REGISTRATION_FUNCTION(opType) \
+ const IOperationConverter* registerConverter_##opType();
+
+NN_FOR_EACH_OPERATION(NN_FORWARD_DECLARE_OPERATION_CONVERTER_REGISTRATION_FUNCTION)
+
+#undef NN_FORWARD_DECLARE_OPERATION_CONVERTER_REGISTRATION_FUNCTION
+
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(AVERAGE_POOL_2D);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(CONCATENATION);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(DEPTH_TO_SPACE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(DEQUANTIZE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(EMBEDDING_LOOKUP);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(FLOOR);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(FULLY_CONNECTED);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(HASHTABLE_LOOKUP);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(L2_NORMALIZATION);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(L2_POOL_2D);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LOCAL_RESPONSE_NORMALIZATION);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LSH_PROJECTION);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LSTM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(MAX_POOL_2D);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(MUL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RELU);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RELU1);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RELU6);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RESHAPE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RESIZE_BILINEAR);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RNN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SOFTMAX);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SPACE_TO_DEPTH);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SVDF);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(TANH);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(BATCH_TO_SPACE_ND);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(DIV);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(MEAN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(PAD);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SPACE_TO_BATCH_ND);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SQUEEZE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(STRIDED_SLICE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SUB);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(TRANSPOSE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(ABS);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(ARGMAX);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(ARGMIN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(AXIS_ALIGNED_BBOX_TRANSFORM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(BIDIRECTIONAL_SEQUENCE_LSTM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(BIDIRECTIONAL_SEQUENCE_RNN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(BOX_WITH_NMS_LIMIT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(CAST);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(CHANNEL_SHUFFLE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(DENSIFY);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(DETECTION_POSTPROCESSING);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(EQUAL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(EXP);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(EXPAND_DIMS);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(GATHER);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(GENERATE_PROPOSALS);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(GREATER);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(GREATER_EQUAL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(GROUPED_CONV_2D);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(HEATMAP_MAX_KEYPOINT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(INSTANCE_NORMALIZATION);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LESS);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LESS_EQUAL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LOG);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LOGICAL_AND);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LOGICAL_NOT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LOGICAL_OR);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(LOG_SOFTMAX);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(MAXIMUM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(MINIMUM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(NEG);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(NOT_EQUAL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(PAD_V2);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(POW);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(PRELU);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(QUANTIZE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(QUANTIZED_16BIT_LSTM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RANDOM_MULTINOMIAL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REDUCE_ALL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REDUCE_ANY);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REDUCE_MAX);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REDUCE_MIN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REDUCE_PROD);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REDUCE_SUM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(ROI_ALIGN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(ROI_POOLING);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RSQRT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SELECT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SIN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SLICE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SPLIT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(SQRT);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(TILE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(TOPK_V2);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(TRANSPOSE_CONV_2D);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(UNIDIRECTIONAL_SEQUENCE_LSTM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(UNIDIRECTIONAL_SEQUENCE_RNN);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RESIZE_NEAREST_NEIGHBOR);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(QUANTIZED_LSTM);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(IF);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(WHILE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(ELU);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(HARD_SWISH);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(FILL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(RANK);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(BATCH_MATMUL);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(PACK);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(MIRROR_PAD);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(REVERSE);
+NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(OEM_OPERATION);
+
+OperationConverterResolver::OperationConverterResolver() {
+#define NN_REGISTER_OPERATION_CONVERTER_TO_RESOLVER(operationType) \
+ registerOperationConverter(registerConverter_##operationType(), OperationType::operationType);
+ NN_FOR_EACH_OPERATION(NN_REGISTER_OPERATION_CONVERTER_TO_RESOLVER)
+#undef NN_REGISTER_OPERATION_CONVERTER_TO_RESOLVER
+}
+
+const IOperationConverter* OperationConverterResolver::findOperationConverter(
+ OperationType operationType) const {
+ int32_t index = static_cast<int32_t>(operationType);
+ if (index >= 0 && index < kNumberOfOperationTypes) {
+ return mConverters[index];
+ }
+ return nullptr;
+}
+
+void OperationConverterResolver::registerOperationConverter(
+ const IOperationConverter* operationConverter, OperationType operationType) {
+ if (operationConverter == nullptr) {
+ return;
+ }
+
+ int32_t index = static_cast<int32_t>(operationType);
+ CHECK(mConverters[index] == nullptr);
+ mConverters[index] = operationConverter;
+}
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/OperationConverterResolver.h b/runtime/operation_converters/OperationConverterResolver.h
new file mode 100644
index 000000000..405779984
--- /dev/null
+++ b/runtime/operation_converters/OperationConverterResolver.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_OPERATION_CONVERTER_RESOLVER_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_OPERATION_CONVERTER_RESOLVER_H
+
+#include "OperationConverter.h"
+#include "SubGraphContext.h"
+
+namespace android {
+namespace nn {
+
+// OperationConverterResolver is used to register all operation converters that implement
+// IOperationConverter. This retrieves the correct converter to use based on OperationType
+class OperationConverterResolver {
+ public:
+ static const OperationConverterResolver* get() {
+ static OperationConverterResolver instance;
+ return &instance;
+ }
+ const IOperationConverter* findOperationConverter(OperationType operationType) const;
+
+ private:
+ OperationConverterResolver();
+
+ void registerOperationConverter(const IOperationConverter* operationConverter,
+ OperationType operationType);
+
+ const IOperationConverter* mConverters[kNumberOfOperationTypes] = {};
+};
+
+// Use to register operation converter into OperationConverterResolver
+#define NN_REGISTER_OPERATION_CONVERTER(identifier, OperationConverterClass) \
+ const IOperationConverter* registerConverter_##identifier() { \
+ static OperationConverterClass converter; \
+ return &converter; \
+ }
+
+// Use to indicate which operations are not supported
+#define NN_OPERATION_CONVERTER_NOT_IMPLEMENTED(identifier) \
+ const IOperationConverter* registerConverter_##identifier() { \
+ return nullptr; \
+ }
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_OPERATION_CONVERTER_RESOLVER_H \ No newline at end of file
diff --git a/runtime/operation_converters/SubGraphContext.cpp b/runtime/operation_converters/SubGraphContext.cpp
new file mode 100644
index 000000000..c4ccb50ed
--- /dev/null
+++ b/runtime/operation_converters/SubGraphContext.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SubGraphContext"
+
+#include "SubGraphContext.h"
+
+#include <limits>
+
+#include "FlatbufferModelBuilderUtils.h"
+
+namespace android {
+namespace nn {
+
+SubGraphContext::SubGraphContext(const Model* model, const Model::Subgraph* subgraph,
+ flatbuffers::FlatBufferBuilder* builder,
+ std::vector<OperatorCodeFlatbuffer>* opCodesVector,
+ std::vector<int>* opCodeIndexForOperationType,
+ std::vector<BufferFlatbuffer>* bufferVector)
+ : mModel(model),
+ mSubgraph(subgraph),
+ mBuilder(builder),
+ mOpCodesVector(opCodesVector),
+ mOpCodeIndexForOperationType(opCodeIndexForOperationType),
+ mBufferVector(bufferVector) {
+ CHECK(model != nullptr);
+ CHECK(subgraph != nullptr);
+ CHECK(opCodesVector != nullptr);
+ CHECK(opCodeIndexForOperationType != nullptr);
+ CHECK(bufferVector != nullptr);
+
+ mOperandToTensorIdx.resize(subgraph->operands.size(), -1);
+ mMappings.resize(model->pools.size());
+}
+
+SubGraphFlatbuffer SubGraphContext::finish() {
+ return tflite::CreateSubGraphDirect(*mBuilder, &mTensorVector, &mInputTensors, &mOutputTensors,
+ &mOperatorVector);
+}
+
+int SubGraphContext::addTensorFlatbuffer(TensorFlatbuffer tensor, int32_t operandIdx) {
+ mTensorVector.push_back(tensor);
+
+ int tensorIdx = mTensorVector.size() - 1;
+ if (operandIdx >= 0) {
+ CHECK(mOperandToTensorIdx[operandIdx] == -1);
+ mOperandToTensorIdx[operandIdx] = tensorIdx;
+ }
+ return tensorIdx;
+}
+
+void SubGraphContext::addOperatorFlatbuffer(OperatorFlatbuffer opFlatbuffer) {
+ mOperatorVector.push_back(opFlatbuffer);
+}
+
+void SubGraphContext::addSubGraphInput(int32_t operandIdx) {
+ CHECK(mOperandToTensorIdx[operandIdx] != -1);
+ mInputTensors.push_back(mOperandToTensorIdx[operandIdx]);
+}
+
+void SubGraphContext::addSubGraphOutput(int32_t operandIdx) {
+ CHECK(mOperandToTensorIdx[operandIdx] != -1);
+ mOutputTensors.push_back(mOperandToTensorIdx[operandIdx]);
+}
+
+uint32_t SubGraphContext::addOpCode(OperationType operationType) {
+ uint32_t idx = static_cast<uint32_t>(operationType);
+ if (mOpCodeIndexForOperationType->at(idx) != -1) {
+ return mOpCodeIndexForOperationType->at(idx);
+ }
+
+ OperatorCodeFlatbuffer opCode;
+
+ tflite::BuiltinOperator builtinCode = getFlatbufferOperator(operationType);
+ if (builtinCode < tflite::BuiltinOperator::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)
+ opCode = tflite::CreateOperatorCode(
+ *mBuilder, static_cast<int8_t>(builtinCode) /* deprecated_builtin_code */,
+ 0 /* custom_code */, getMaxOperatorVersionCode(builtinCode) /* version */);
+ else
+ opCode = tflite::CreateOperatorCode(*mBuilder, 0 /* deprecated_builtin_code */,
+ 0 /* custom_code */,
+ getMaxOperatorVersionCode(builtinCode) /* version */,
+ builtinCode /* builtin_code */);
+
+ mOpCodesVector->push_back(opCode);
+ uint32_t opCodeIdx = mOpCodesVector->size() - 1;
+ (*mOpCodeIndexForOperationType)[idx] = opCodeIdx;
+ return opCodeIdx;
+}
+
+int SubGraphContext::getTensorIdxFromOperandIdx(int operandIdx) const {
+ return mOperandToTensorIdx[operandIdx];
+}
+
+const Mapping& SubGraphContext::getMapping(uint32_t poolIndex) {
+ if (mMappings[poolIndex].size > 0) {
+ return mMappings[poolIndex];
+ }
+
+ SharedMemory memory = mModel->pools[poolIndex];
+ GeneralResult<Mapping> mapping = map(memory);
+ CHECK(mapping.has_value()) << "CONSTANT_REFERENCE memory mapping error: "
+ << mapping.error().message;
+
+ mMappings[poolIndex] = std::move(mapping).value();
+ return mMappings[poolIndex];
+}
+
+std::pair<const uint8_t*, uint32_t> SubGraphContext::getConstantPointerAndLength(
+ const Operand& operand) {
+ CHECK(isOperandConstant(operand));
+
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
+ return std::make_pair(mModel->operandValues.data() + operand.location.offset,
+ operand.location.length);
+ }
+
+ const Mapping& mapping = getMapping(operand.location.poolIndex);
+ const uint8_t* memoryPtr = static_cast<const uint8_t*>(
+ std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, mapping.pointer));
+
+ return std::make_pair(memoryPtr + operand.location.offset, operand.location.length);
+}
+
+uint32_t SubGraphContext::addBufferFromData(const uint8_t* data, uint32_t length) {
+ auto dataVectorFlatbuffer = mBuilder->CreateVector(data, length);
+
+ auto buffer = tflite::CreateBuffer(*mBuilder, dataVectorFlatbuffer);
+ mBufferVector->push_back(buffer);
+
+ return mBufferVector->size() - 1;
+}
+
+Result<void> SubGraphContext::createTensorFlatbufferFromOperand(uint32_t operandIdx,
+ bool makeSymmetric) {
+ // An output Operand to one Operation can be an input Operand to
+ // another Operation, so this function can be run more than once.
+ // We simply return if the Tensor for the Operand is already created.
+ if (mOperandToTensorIdx[operandIdx] != -1) return {};
+
+ const Operand& operand = mSubgraph->operands[operandIdx];
+
+ std::vector<float> scaleVector{operand.scale};
+ std::vector<int64_t> zeroPointVector{operand.zeroPoint};
+ // min and max used to convert TFLite models to TF models, so it is unused in this case and can
+ // be set to 0
+ std::vector<float> minVector{0};
+ std::vector<float> maxVector{0};
+
+ // build quantization parameters
+ auto quantizationParams = tflite::CreateQuantizationParametersDirect(
+ *mBuilder, &minVector /* min */, &maxVector /* max */, &scaleVector /* scale */,
+ &zeroPointVector /* zero_point */,
+ tflite::QuantizationDetails::QuantizationDetails_NONE /* details_type */);
+
+ // add buffer if constant operand
+ // buffer at index 0 is reserved for tensors without a buffer
+ uint32_t bufferIdx = 0;
+ if (isOperandConstant(operand)) {
+ auto [data, dataLength] = getConstantPointerAndLength(operand);
+ if (makeSymmetric && operand.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
+ std::vector<int8_t> dataVector(reinterpret_cast<const int8_t*>(data),
+ reinterpret_cast<const int8_t*>(data) + dataLength);
+ bool emitWarning = false;
+ for (uint32_t i = 0; i < dataLength; i++) {
+ int32_t newValue = static_cast<int32_t>(dataVector[i]) - operand.zeroPoint;
+ if (newValue < std::numeric_limits<int8_t>::min() ||
+ newValue > std::numeric_limits<int8_t>::max()) {
+ emitWarning = true;
+ }
+ dataVector[i] = static_cast<int8_t>(std::clamp(
+ newValue, static_cast<int32_t>(std::numeric_limits<int8_t>::min()),
+ static_cast<int32_t>(std::numeric_limits<int8_t>::max())));
+ }
+
+ if (emitWarning) {
+ LOG(WARNING) << "Asymmetric to symmetric conversion will result in "
+ "underflow/overflow. Clamping data";
+ }
+ bufferIdx = addBufferFromData(reinterpret_cast<const uint8_t*>(dataVector.data()),
+ dataLength);
+ } else {
+ bufferIdx = addBufferFromData(data, dataLength);
+ }
+ }
+
+ // shape of tensor
+ std::vector<int32_t> shape(operand.dimensions.begin(), operand.dimensions.end());
+ replaceZeroDimensions(&shape);
+
+ // build tensor
+ TensorFlatbuffer tensor = tflite::CreateTensorDirect(
+ *mBuilder, &shape, NN_TRY(getTensorFlatbufferOperandType(operand.type)) /* type */,
+ bufferIdx /* buffer */, 0 /* name */, quantizationParams /* quantization */);
+ addTensorFlatbuffer(tensor, operandIdx);
+
+ return {};
+}
+
+} // namespace nn
+} // namespace android \ No newline at end of file
diff --git a/runtime/operation_converters/SubGraphContext.h b/runtime/operation_converters/SubGraphContext.h
new file mode 100644
index 000000000..17d3d0eb9
--- /dev/null
+++ b/runtime/operation_converters/SubGraphContext.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_SUBGRAPH_CONTEXT_H
+#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_SUBGRAPH_CONTEXT_H
+
+#include <utility>
+#include <vector>
+
+#include "FlatbufferModelBuilderUtils.h"
+#include "NeuralNetworks.h"
+
+namespace android {
+namespace nn {
+
+// This keeps track of all the data needed to convert NNAPI subgraphs to TFLite subgraphs
+// This also provides information needed to convert NNAPI Operations to TFLite Operators
+// Once the subgraph is done building, call finish() to return the flatbuffer
+class SubGraphContext {
+ public:
+ SubGraphContext(const Model* model, const Model::Subgraph* subgraph,
+ flatbuffers::FlatBufferBuilder* builder,
+ std::vector<OperatorCodeFlatbuffer>* opCodesVector,
+ std::vector<int>* opCodeIndexForOperationType,
+ std::vector<BufferFlatbuffer>* bufferVector);
+
+ SubGraphFlatbuffer finish();
+
+ // If the operandIdx is -1, it suggests that the tensor being added doesn't have a
+ // corresponding Operand from the NNAPI NDK model.
+ // Returns index of Tensor being added.
+ int addTensorFlatbuffer(TensorFlatbuffer tensor, int32_t operandIdx = -1);
+ void addOperatorFlatbuffer(OperatorFlatbuffer opFlatbuffer);
+ void addSubGraphInput(int32_t operandIdx);
+ void addSubGraphOutput(int32_t operandIdx);
+
+ const Model::Subgraph* getSubgraph() const { return mSubgraph; }
+ // Returns -1 if there is no corresponding tensor index
+ int getTensorIdxFromOperandIdx(int operandIdx) const;
+ uint32_t addOpCode(OperationType operationType);
+ flatbuffers::FlatBufferBuilder& getBuilder() { return *mBuilder; }
+
+ // OperandLifeTime must be CONSTANT_COPY or CONSTANT_REFERENCE
+ // Will crash if OperandLifeTime is not either of the two.
+ // dataSize is the size of data in bytes.
+ template <typename Type>
+ void copyConstantValueToData(const Operand& operand, Type* data, size_t dataSize);
+ template <typename Type>
+ Type getConstantScalar(const Operand& operand);
+
+ // Returns Buffer index
+ uint32_t addBufferFromData(const uint8_t* data, uint32_t length);
+ // makeSymmetric turns asymmetric tensors to symmetric by doing setting data = data - zeroPoint
+ // makeSymmetric is supported only for constant OperandType::TENSOR_QUANT8_ASYMM_SIGNED
+ // If unsupported type is passed, makeSymmetric is ignored
+ Result<void> createTensorFlatbufferFromOperand(uint32_t operandIdx, bool makeSymmetric = false);
+
+ private:
+ const Mapping& getMapping(uint32_t poolIndex);
+ std::pair<const uint8_t*, uint32_t> getConstantPointerAndLength(const Operand& operand);
+
+ const Model* mModel;
+ const Model::Subgraph* mSubgraph;
+ flatbuffers::FlatBufferBuilder* mBuilder;
+
+ std::vector<OperatorCodeFlatbuffer>* mOpCodesVector;
+ std::vector<int>* mOpCodeIndexForOperationType;
+ std::vector<BufferFlatbuffer>* mBufferVector;
+
+ std::vector<OperatorFlatbuffer> mOperatorVector;
+ std::vector<TensorFlatbuffer> mTensorVector;
+ std::vector<int32_t> mInputTensors;
+ std::vector<int32_t> mOutputTensors;
+ std::vector<int> mOperandToTensorIdx;
+ // Each index corresponds to the pool index of shared memory
+ std::vector<Mapping> mMappings;
+};
+
+template <typename Type>
+void SubGraphContext::copyConstantValueToData(const Operand& operand, Type* data, size_t dataSize) {
+ auto [pointer, length] = getConstantPointerAndLength(operand);
+ CHECK_GE(dataSize, length);
+
+ std::memcpy(data, pointer, length);
+}
+
+template <typename Type>
+Type SubGraphContext::getConstantScalar(const Operand& operand) {
+ Type data;
+ copyConstantValueToData(operand, &data, sizeof(Type));
+ return data;
+}
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_OPERATION_CONVERTERS_SUBGRAPH_CONTEXT_H \ No newline at end of file
diff --git a/runtime/packageinfo/libneuralnetworks_packageinfo.map.txt b/runtime/packageinfo/libneuralnetworks_packageinfo.map.txt
index 803b34556..95308d31f 100644
--- a/runtime/packageinfo/libneuralnetworks_packageinfo.map.txt
+++ b/runtime/packageinfo/libneuralnetworks_packageinfo.map.txt
@@ -15,8 +15,8 @@
#
LIBNEURALNETWORKS_PACKAGE_INFO {
global:
- ANeuralNetworks_fetch_PackageInfo; # apex
- ANeuralNetworks_free_PackageInfo; # apex
+ ANeuralNetworks_fetch_PackageInfo; # systemapi
+ ANeuralNetworks_free_PackageInfo; # systemapi
local:
*;
};
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp
index 57addd92b..79044845d 100644
--- a/runtime/test/Android.bp
+++ b/runtime/test/Android.bp
@@ -201,6 +201,58 @@ cc_defaults {
],
}
+cc_defaults {
+ name: "NeuralNetworksTest_v2_static_defaults",
+ defaults: ["NeuralNetworksTest_static_defaults"],
+ srcs: [
+ "TestCompatibilityLayer.cpp",
+ ],
+ exclude_srcs: [
+ "PreparedModelCallback.cpp",
+ "TestCompilationCaching.cpp",
+ "TestCompliance.cpp",
+ "TestControlFlow.cpp",
+ "TestExecution.cpp",
+ "TestExtensions.cpp",
+ "TestFailingDriver.cpp",
+ "TestFree.cpp",
+ "TestGenerated.cpp",
+ "TestIntrospectionControl.cpp",
+ "TestMemory.cpp",
+ "TestMemoryDomain.cpp",
+ "TestMemoryInternal.cpp",
+ "TestOperandExtraParams.cpp",
+ "TestPartitioning.cpp",
+ "TestPartitioningRandom.cpp",
+ "TestRemoveDefaultArguments.cpp",
+ "TestServerFlag.cpp",
+ "TestTelemetry.cpp",
+ "TestTrivialModel.cpp",
+ "TestUnknownDimensions.cpp",
+ "TestUnspecifiedDimensions.cpp",
+ "TestUpdatability.cpp",
+ "TestValidateModel.cpp",
+ "TestValidateOperations.cpp",
+ "TestValidation.cpp",
+ "fibonacci_extension/FibonacciDriver.cpp",
+ "fibonacci_extension/FibonacciExtensionTest.cpp",
+ ],
+
+ include_dirs: [
+ "external/flatbuffers/include",
+ "external/tensorflow",
+ ],
+
+ static_libs: [
+ "libflatbuffers-cpp",
+ "libneuralnetworks_v2_static_experimental",
+ "libtflite_static",
+ ],
+ exclude_static_libs: [
+ "libneuralnetworks_static",
+ ],
+}
+
cc_test {
name: "NeuralNetworksTest_static",
defaults: ["NeuralNetworksTest_static_defaults"],
@@ -238,6 +290,41 @@ cc_test {
},
}
+cc_test {
+ name: "NeuralNetworksTest_v2_static",
+ defaults: ["NeuralNetworksTest_v2_static_defaults"],
+ test_suites: [
+ "general-tests",
+ ],
+ target: {
+ android: {
+ test_config: "AndroidTest_NeuralNetworksTest_v2_static.xml",
+ srcs: ["TestStatsdTelemetry.cpp"],
+ },
+ host: {
+ cflags: [
+ "-D__ANDROID_API__=10000",
+ ],
+ },
+ },
+ whole_static_libs: [
+ "neuralnetworks_generated_experimental_example",
+ ],
+ exclude_static_libs: [
+ "libneuralnetworks_common",
+ "neuralnetworks_types",
+ "server_configurable_flags",
+ ],
+ static_libs: [
+ "libneuralnetworks_common_experimental",
+ "neuralnetworks_types_experimental",
+ ],
+ cflags: ["-DNN_EXPERIMENTAL_FEATURE"],
+ test_options: {
+ unit_test: false,
+ },
+}
+
tidy_disabled_operation_signatures_files = [
// These took too much time with clang-tidy.
"fuzzing/operation_signatures/Convolutions.cpp",
@@ -417,7 +504,6 @@ cc_test {
cc_library_static {
name: "CtsNNAPITests_static",
host_supported: true,
- defaults: ["neuralnetworks_float16"],
srcs: [
":libneuralnetworks_generated_test_harness_for_cts",
"CtsMain.cpp",
@@ -506,7 +592,6 @@ cc_library_static {
cc_defaults {
name: "neuralnetworks_generated_defaults",
- defaults: ["neuralnetworks_float16"],
tidy: false, // generated files are too big to run with clang-tidy
host_supported: true,
vendor_available: true,
@@ -552,7 +637,6 @@ cc_library_static {
cc_library_static {
name: "neuralnetworks_generated_V1_3_cts_only_example",
host_supported: true,
- defaults: ["neuralnetworks_float16"],
tidy: false, // generated files are too big to run with clang-tidy
srcs: ["generated/spec_V1_3_cts_only/*.example.cpp"],
static_libs: ["libneuralnetworks_generated_test_harness"],
@@ -568,7 +652,6 @@ cc_library_static {
cc_library_static {
name: "NeuralNetworksTest_random_graph",
host_supported: true,
- defaults: ["neuralnetworks_float16"],
srcs: [
":libneuralnetworks_generated_test_harness_for_cts",
"GeneratedTestUtils.cpp",
diff --git a/runtime/test/AndroidTest_NeuralNetworksTest_v2_static.xml b/runtime/test/AndroidTest_NeuralNetworksTest_v2_static.xml
new file mode 100644
index 000000000..d0ca05745
--- /dev/null
+++ b/runtime/test/AndroidTest_NeuralNetworksTest_v2_static.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2022 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs NeuralNetworksTest_v2_static.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ <option name="force-root" value="false" />
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="NeuralNetworksTest_v2_static->/data/local/tmp/NeuralNetworksTest_v2_static" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="NeuralNetworksTest_v2_static" />
+ <option name="native-test-timeout" value="3h" />
+ </test>
+</configuration>
diff --git a/runtime/test/SupportLibraryTestGenerated.cpp b/runtime/test/SupportLibraryTestGenerated.cpp
index ceff04a2d..609987f62 100644
--- a/runtime/test/SupportLibraryTestGenerated.cpp
+++ b/runtime/test/SupportLibraryTestGenerated.cpp
@@ -61,6 +61,11 @@ namespace android::nn::generated_tests {
using namespace sl_wrapper;
using namespace test_helper;
+enum ComputeWithDeviceMemoriesResult {
+ SKIP,
+ OK,
+};
+
class GeneratedTests : public GeneratedTestBase {
protected:
void SetUp() override;
@@ -72,9 +77,9 @@ class GeneratedTests : public GeneratedTestBase {
uint32_t index);
ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation,
uint32_t index);
- void computeWithDeviceMemories(const Compilation& compilation, const TestModel& testModel,
- Execution* execution, Execution::ComputeMode computeMode,
- Result* result, std::vector<TestBuffer>* outputs);
+ ComputeWithDeviceMemoriesResult computeWithDeviceMemories(
+ const Compilation& compilation, const TestModel& testModel, Execution* execution,
+ Execution::ComputeMode computeMode, Result* result, std::vector<TestBuffer>* outputs);
bool checkSupported(const Model& model, ANeuralNetworksDevice* device);
std::optional<Compilation> compileModel(const Model& model, ANeuralNetworksDevice* device);
void executeWithCompilation(const Compilation& compilation, const TestModel& testModel);
@@ -284,13 +289,12 @@ ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForOutput(const Compila
}
// Set result = Result::NO_ERROR and outputs = {} if the test should be skipped.
-void GeneratedTests::computeWithDeviceMemories(const Compilation& compilation,
- const TestModel& testModel, Execution* execution,
- Execution::ComputeMode computeMode, Result* result,
- std::vector<TestBuffer>* outputs) {
- ASSERT_NE(execution, nullptr);
- ASSERT_NE(result, nullptr);
- ASSERT_NE(outputs, nullptr);
+ComputeWithDeviceMemoriesResult GeneratedTests::computeWithDeviceMemories(
+ const Compilation& compilation, const TestModel& testModel, Execution* execution,
+ Execution::ComputeMode computeMode, Result* result, std::vector<TestBuffer>* outputs) {
+ EXPECT_NE(execution, nullptr);
+ EXPECT_NE(result, nullptr);
+ EXPECT_NE(outputs, nullptr);
outputs->clear();
std::vector<Memory> inputMemories, outputMemories;
@@ -302,30 +306,34 @@ void GeneratedTests::computeWithDeviceMemories(const Compilation& compilation,
const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
// Omitted input.
if (operand.data.size() == 0) {
- ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
+ EXPECT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
continue;
}
// Create device memory.
ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i);
- ASSERT_NE(memory, nullptr);
+ if (memory == nullptr) {
+ return ComputeWithDeviceMemoriesResult::SKIP;
+ }
auto& wrapperMemory = inputMemories.emplace_back(Memory(mNnApi.get(), memory));
// Copy data from TestBuffer to device memory.
auto ashmem = TestAshmem::createFrom(mNnApi.get(), operand.data);
- ASSERT_NE(ashmem, nullptr);
- ASSERT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
+ EXPECT_NE(ashmem, nullptr);
+ EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
ANEURALNETWORKS_NO_ERROR);
- ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
+ EXPECT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
}
// Model outputs.
for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
SCOPED_TRACE("Output index: " + std::to_string(i));
ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
- ASSERT_NE(memory, nullptr);
+ if (memory == nullptr) {
+ return ComputeWithDeviceMemoriesResult::SKIP;
+ }
auto& wrapperMemory = outputMemories.emplace_back(Memory(mNnApi.get(), memory));
- ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
+ EXPECT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
}
}
@@ -339,13 +347,14 @@ void GeneratedTests::computeWithDeviceMemories(const Compilation& compilation,
auto& output = outputs->emplace_back(bufferSize);
auto ashmem = TestAshmem::createFrom(mNnApi.get(), output);
- ASSERT_NE(ashmem, nullptr);
- ASSERT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(outputMemories[i].get(),
+ EXPECT_NE(ashmem, nullptr);
+ EXPECT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(outputMemories[i].get(),
ashmem->get()->get()),
ANEURALNETWORKS_NO_ERROR);
std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize,
output.getMutable<uint8_t>());
}
+ return ComputeWithDeviceMemoriesResult::OK;
}
void GeneratedTests::executeWithCompilation(const Compilation& compilation,
@@ -357,8 +366,11 @@ void GeneratedTests::executeWithCompilation(const Compilation& compilation,
std::vector<TestBuffer> outputs;
if (mTestDeviceMemory) {
- computeWithDeviceMemories(compilation, testModel, &execution, mComputeMode, &result,
- &outputs);
+ if (computeWithDeviceMemories(compilation, testModel, &execution, mComputeMode, &result,
+ &outputs) == ComputeWithDeviceMemoriesResult::SKIP) {
+ std::cout << "\nModel not supported by device memories. Skipping" << std::endl;
+ return;
+ }
} else {
computeWithPtrs(testModel, &execution, mComputeMode, &result, &outputs);
}
diff --git a/runtime/test/TestCompatibilityLayer.cpp b/runtime/test/TestCompatibilityLayer.cpp
new file mode 100644
index 000000000..99674a649
--- /dev/null
+++ b/runtime/test/TestCompatibilityLayer.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+#include <android-base/properties.h>
+#include <ftw.h>
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <fstream>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <thread>
+#include <utility>
+#include <vector>
+
+#include "AndroidVersionUtil.h"
+#include "GeneratedTestUtils.h"
+#include "NeuralNetworks.h"
+#include "NeuralNetworksTypes.h"
+#include "TestHarness.h"
+#include "TestNeuralNetworksWrapper.h"
+#include "TestUtils.h"
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#include "tensorflow/lite/interpreter.h"
+#include "tensorflow/lite/kernels/register.h"
+#include "tensorflow/lite/model.h"
+#pragma clang diagnostic pop
+
+#ifdef NNTEST_CTS
+#define NNTEST_COMPUTE_MODE
+#endif
+
+namespace android::nn::generated_tests {
+using namespace test_wrapper;
+using namespace test_helper;
+
+class CompatibilityLayerGeneratedTests : public GeneratedTestBase {
+ protected:
+ void SetUp() override;
+ void TearDown() override;
+
+ // Test driver for those generated from packages/modules/NeuralNetworks/runtime/test/specs
+ void execute(const TestModel& testModel);
+
+ bool mTestDynamicOutputShape = false;
+ bool mTestSupported = true;
+};
+
+class CompatibilityLayerGeneratedTestsSupported : public CompatibilityLayerGeneratedTests {};
+class CompatibilityLayerGeneratedTestsUnsupported : public CompatibilityLayerGeneratedTests {};
+class CompatibilityLayerGeneratedTestsDynamicOutput : public CompatibilityLayerGeneratedTests {};
+
+void CompatibilityLayerGeneratedTests::execute(const TestModel& testModel) {
+ GeneratedModel model;
+ createModel(testModel, mTestDynamicOutputShape, &model);
+ if (testModel.expectFailure && !model.isValid()) {
+ return;
+ }
+ ASSERT_EQ(model.finish(), Result::NO_ERROR);
+ ASSERT_TRUE(model.isValid());
+
+ Compilation compilation(&model);
+ Result result = compilation.finish();
+ if (!mTestSupported && result != Result::NO_ERROR) return;
+ ASSERT_EQ(result, Result::NO_ERROR);
+
+ Execution execution(&compilation);
+
+ // Model inputs.
+ for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
+ const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
+ ASSERT_EQ(Result::NO_ERROR,
+ execution.setInput(i, operand.data.get<void>(), operand.data.size()));
+ }
+
+ // Model outputs.
+ std::vector<TestBuffer> outputs;
+ for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
+ const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
+ const size_t bufferSize = std::max<size_t>(operand.data.size(), 1);
+ outputs.emplace_back(bufferSize);
+
+ ASSERT_EQ(Result::NO_ERROR,
+ execution.setOutput(i, outputs.back().getMutable<void>(), bufferSize));
+ }
+
+ result = execution.compute(Execution::ComputeMode::SYNC);
+ ASSERT_EQ(result, Result::NO_ERROR);
+
+ // If a conv filter under/overflows, "compatibleTest" will report
+ // unsupported, but the actual conversion will result in NO_ERROR because
+ // it is treated as a warning, rather than an error. Because of the accuracy
+ // loss, we should not check test results in such a case.
+ //
+ // TODO(b/237410741): A potentially better approach is to have
+ // "compatibleTest" report three status: fully supported, supported with
+ // accuracy loss, and not supported.
+ if (mTestSupported) {
+ checkResults(testModel, outputs);
+ }
+}
+
+void CompatibilityLayerGeneratedTests::SetUp() {
+ GeneratedTestBase::SetUp();
+}
+
+void CompatibilityLayerGeneratedTests::TearDown() {
+ GeneratedTestBase::TearDown();
+}
+
+namespace {
+
+bool compatibleTest(const TestModel& testModel) {
+ static const std::vector<TestOperationType> kSupportedOperationTypes{
+ TestOperationType::CONV_2D, TestOperationType::ADD,
+ TestOperationType::DEPTHWISE_CONV_2D, TestOperationType::LOGISTIC};
+ static const std::vector<TestOperandType> kSupportedOperandTypes{
+ TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::BOOL,
+ TestOperandType::INT32};
+
+ if (testModel.hasControlFlow()) {
+ return false;
+ }
+
+ bool result = true;
+ const TestSubgraph& mainSubgraph = testModel.main;
+
+ result &= std::all_of(
+ mainSubgraph.operations.begin(), mainSubgraph.operations.end(),
+ [&mainSubgraph](const TestOperation& operation) {
+ bool isOperationCompatible = true;
+ // ensure that tensors are nhwc and filter is constant
+ if (operation.type == TestOperationType::CONV_2D ||
+ operation.type == TestOperationType::DEPTHWISE_CONV_2D) {
+ size_t implicitIsNchwIdx =
+ (operation.type == TestOperationType::CONV_2D) ? 7 : 8;
+ size_t explicitIsNchwIdx = implicitIsNchwIdx + 3;
+ bool isImplicitPadding =
+ operation.inputs.size() <= implicitIsNchwIdx ||
+ mainSubgraph.operands[operation.inputs[implicitIsNchwIdx]].type ==
+ TestOperandType::BOOL;
+ size_t isNchwIdx = isImplicitPadding ? implicitIsNchwIdx : explicitIsNchwIdx;
+
+ if (operation.inputs.size() > static_cast<uint32_t>(isNchwIdx)) {
+ isOperationCompatible &=
+ !(*mainSubgraph.operands[operation.inputs[isNchwIdx]]
+ .data.get<bool>());
+ }
+
+ const int kFilterIdx = 1;
+ const TestOperand& filterOperand =
+ mainSubgraph.operands[operation.inputs[kFilterIdx]];
+ TestOperandLifeTime filterLifetime = filterOperand.lifetime;
+ isOperationCompatible &=
+ (filterLifetime == TestOperandLifeTime::CONSTANT_COPY) ||
+ (filterLifetime == TestOperandLifeTime::CONSTANT_REFERENCE);
+
+ // check that making filter operands symmetrical does not over/underflow
+ // this is because the outputs of the model will be different from expected if
+ // the operand value changes with the under/overflow
+ if (filterOperand.type == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
+ const int8_t* data = filterOperand.data.get<int8_t>();
+ size_t dataSize = filterOperand.data.size();
+
+ for (int32_t i = 0; i < static_cast<int32_t>(dataSize); i++) {
+ int32_t newValue =
+ static_cast<int32_t>(data[i]) - filterOperand.zeroPoint;
+ if (newValue < std::numeric_limits<int8_t>::min() ||
+ newValue > std::numeric_limits<int8_t>::max()) {
+ isOperationCompatible = false;
+ break;
+ }
+ }
+ }
+ }
+
+ isOperationCompatible &=
+ std::find(kSupportedOperationTypes.begin(), kSupportedOperationTypes.end(),
+ operation.type) != kSupportedOperationTypes.end();
+
+ return isOperationCompatible;
+ });
+
+ result &= std::all_of(mainSubgraph.operands.begin(), mainSubgraph.operands.end(),
+ [](const TestOperand& operand) {
+ return std::find(kSupportedOperandTypes.begin(),
+ kSupportedOperandTypes.end(),
+ operand.type) != kSupportedOperandTypes.end();
+ });
+
+ return result;
+}
+
+} // namespace
+
+TEST_P(CompatibilityLayerGeneratedTestsSupported, CompatibilityLayerSupported) {
+ mTestSupported = true;
+ execute(testModel);
+}
+
+TEST_P(CompatibilityLayerGeneratedTestsUnsupported, CompatibilityLayerUnsupported) {
+ mTestSupported = false;
+ execute(testModel);
+}
+
+TEST_P(CompatibilityLayerGeneratedTestsDynamicOutput, CompatibilityLayerDynamicOutput) {
+ mTestDynamicOutputShape = true;
+ mTestSupported = false;
+ execute(testModel);
+}
+
+INSTANTIATE_GENERATED_TEST(CompatibilityLayerGeneratedTestsSupported,
+ [](const TestModel& testModel) {
+ return !testModel.expectFailure && compatibleTest(testModel);
+ });
+
+INSTANTIATE_GENERATED_TEST(CompatibilityLayerGeneratedTestsUnsupported,
+ [](const TestModel& testModel) {
+ return !testModel.expectFailure && !compatibleTest(testModel);
+ });
+
+INSTANTIATE_GENERATED_TEST(CompatibilityLayerGeneratedTestsDynamicOutput,
+ [](const TestModel& testModel) {
+ return !testModel.expectFailure && !testModel.hasScalarOutputs();
+ });
+
+} // namespace android::nn::generated_tests
diff --git a/runtime/test/android_fuzzing/Android.bp b/runtime/test/android_fuzzing/Android.bp
index 0b0f9e845..1e86cda2d 100644
--- a/runtime/test/android_fuzzing/Android.bp
+++ b/runtime/test/android_fuzzing/Android.bp
@@ -25,6 +25,7 @@ package {
cc_library_static {
name: "libneuralnetworks_fuzzer_proto",
host_supported: true,
+ vendor_available: true,
owner: "google",
srcs: ["Model.proto"],
proto: {
@@ -37,6 +38,7 @@ cc_library_static {
cc_library_static {
name: "libneuralnetworks_fuzzer_harness",
host_supported: true,
+ vendor_available: true,
owner: "google",
srcs: [
"Converter.cpp",
diff --git a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index 39a3c77e4..c1de329f5 100644
--- a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -288,16 +288,35 @@ inline std::enable_if_t<nnIsFloat<T>, T> getUniformNonZero(T lower, T upper, T z
// getUniform for integers operates on a closed interval [lower, upper].
// This is important that 255 should be included as a valid candidate for QUANT8_ASYMM values.
+//
+// This template only accepts int8_t, uint8_t, bool, or the types accepted by stdlib's
+// uniform_int_distribution. `char` is not an officially supported type, but may be
+// supported depending on library implementation.
template <typename T>
inline std::enable_if_t<std::is_integral_v<T>, T> getUniform(T lower, T upper) {
- std::uniform_int_distribution<T> dis(lower, upper);
- return dis(RandomNumberGenerator::generator);
+ // uniform_int_distribution is only defined by the stdlib standard
+ // when T is of types short, int, long, long long,
+ // unsigned short, unsigned int, unsigned long, or unsigned long long.
+ //
+ // However, existing code relies on getUniform working for some smaller types,
+ // so we special case them here.
+ if constexpr (std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t> ||
+ std::is_same_v<T, bool>) {
+ // We can get away with using bool here because lower and upper are 0 or 1.
+ // The uint8_t case can always be upsized to int16_t and then downsized because
+ // we'll never generate a random number lower than the minimum of 0 when T
+ // is unsigned.
+ std::uniform_int_distribution<int16_t> dis(lower, upper);
+ return static_cast<T>(dis(RandomNumberGenerator::generator));
+ } else {
+ std::uniform_int_distribution<T> dis(lower, upper);
+ return dis(RandomNumberGenerator::generator);
+ }
}
template <typename T>
inline std::enable_if_t<std::is_integral_v<T>, T> getUniformNonZero(T lower, T upper, T zeroPoint) {
if (upper >= zeroPoint) upper--;
- std::uniform_int_distribution<T> dis(lower, upper);
- const T value = dis(RandomNumberGenerator::generator);
+ const T value = getUniform(lower, upper);
return value >= zeroPoint ? value + 1 : value;
}
diff --git a/shim_and_sl/ShimConverter.cpp b/shim_and_sl/ShimConverter.cpp
index 2cbdc092d..ed3cda236 100644
--- a/shim_and_sl/ShimConverter.cpp
+++ b/shim_and_sl/ShimConverter.cpp
@@ -51,6 +51,10 @@ ANeuralNetworksModel* convertSubgraphFromHAL(
size_t subgraphIndex, const std::vector<uint8_t>& copiedOperandValues,
ErrorStatus* errorStatus) {
*errorStatus = ErrorStatus::NONE;
+ if (allModels == nullptr || subgraphIndex >= (*allModels).size()) {
+ *errorStatus = ErrorStatus::INVALID_ARGUMENT;
+ return nullptr;
+ }
if ((*allModels)[subgraphIndex].has_value()) {
return (*allModels)[subgraphIndex]->getHandle();
}
@@ -128,6 +132,12 @@ ANeuralNetworksModel* convertSubgraphFromHAL(
switch (operand.lifetime) {
case OperandLifeTime::CONSTANT_COPY: {
+ if (operand.location.length + operand.location.offset >
+ model.operandValues.size()) {
+ *errorStatus = ErrorStatus::INVALID_ARGUMENT;
+ return nullptr;
+ }
+
if (operand.location.length <=
ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) {
resultModel.setOperandValue(
diff --git a/shim_and_sl/ShimPreparedModel.cpp b/shim_and_sl/ShimPreparedModel.cpp
index 178cc1c33..840d65a19 100644
--- a/shim_and_sl/ShimPreparedModel.cpp
+++ b/shim_and_sl/ShimPreparedModel.cpp
@@ -85,6 +85,11 @@ ErrorStatus ShimPreparedModel::parseInputs(
}
const auto& model = mMainAndReferencedModels[0];
+
+ if (request.inputs.size() > model.getInputs().size()) {
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+
// set inputs
for (int i = 0; i < request.inputs.size(); ++i) {
const auto& input = request.inputs[i];
@@ -107,6 +112,9 @@ ErrorStatus ShimPreparedModel::parseInputs(
}
}
+ if (request.outputs.size() > model.getOutputs().size()) {
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
// set outputs
for (int i = 0; i < request.outputs.size(); ++i) {
const auto& output = request.outputs[i];
diff --git a/tools/nnapi_info/Android.bp b/tools/nnapi_info/Android.bp
new file mode 100644
index 000000000..048615f5e
--- /dev/null
+++ b/tools/nnapi_info/Android.bp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ // Inherits all licenses from parent to get Apache 2.0 and package name
+ default_applicable_licenses: [
+ "packages_modules_NeuralNetworks_license",
+ ],
+}
+
+cc_binary {
+ name: "nnapi_info",
+ srcs: [
+ "nnapi_info.cpp",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "liblog",
+ "libnativewindow",
+ "libneuralnetworks",
+ "libutils",
+ ],
+}
diff --git a/tools/nnapi_info/nnapi_info.cpp b/tools/nnapi_info/nnapi_info.cpp
new file mode 100644
index 000000000..e3170bbfb
--- /dev/null
+++ b/tools/nnapi_info/nnapi_info.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "NnapiInfo"
+
+#define CONTINUE_IF_ERR(expr) \
+ { \
+ int _errCode = (expr); \
+ if (_errCode != ANEURALNETWORKS_NO_ERROR) { \
+ std::cerr << #expr << " failed at " << __FILE__ << ":" << __LINE__ << std::endl; \
+ continue; \
+ } \
+ }
+
+#include <iostream>
+#include <string>
+
+#include "NeuralNetworks.h"
+#include "NeuralNetworksTypes.h"
+
+namespace {
+std::string featureLevelString(int64_t featureLevel) {
+ switch (featureLevel) {
+ case ANEURALNETWORKS_FEATURE_LEVEL_1:
+ return "Level 1";
+ case ANEURALNETWORKS_FEATURE_LEVEL_2:
+ return "Level 2";
+ case ANEURALNETWORKS_FEATURE_LEVEL_3:
+ return "Level 3";
+ case ANEURALNETWORKS_FEATURE_LEVEL_4:
+ return "Level 4";
+ case ANEURALNETWORKS_FEATURE_LEVEL_5:
+ return "Level 5";
+ case ANEURALNETWORKS_FEATURE_LEVEL_6:
+ return "Level 6";
+ case ANEURALNETWORKS_FEATURE_LEVEL_7:
+ return "Level 7";
+ case ANEURALNETWORKS_FEATURE_LEVEL_8:
+ return "Level 8";
+ default:
+ return "Undefined feature level code";
+ }
+}
+
+std::string deviceTypeString(int32_t type) {
+ switch (type) {
+ case ANEURALNETWORKS_DEVICE_ACCELERATOR:
+ return "Accelerator";
+ case ANEURALNETWORKS_DEVICE_CPU:
+ return "CPU";
+ case ANEURALNETWORKS_DEVICE_GPU:
+ return "GPU";
+ case ANEURALNETWORKS_DEVICE_OTHER:
+ return "Other";
+ case ANEURALNETWORKS_DEVICE_UNKNOWN:
+ default:
+ return "Unknown";
+ }
+}
+} // namespace
+
+int main() {
+ uint32_t numDevices;
+ int returnCode = ANeuralNetworks_getDeviceCount(&numDevices);
+ if (returnCode != ANEURALNETWORKS_NO_ERROR) {
+ std::cerr << "Error obtaining device count" << std::endl;
+ return 1;
+ }
+
+ std::cout << "Number of devices: " << numDevices << std::endl << std::endl;
+
+ ANeuralNetworksDevice* device = nullptr;
+ int64_t featureLevel;
+ const char* name;
+ int32_t type;
+ const char* version;
+ for (uint32_t i = 0; i < numDevices; i++) {
+ CONTINUE_IF_ERR(ANeuralNetworks_getDevice(i, &device));
+ CONTINUE_IF_ERR(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel));
+ CONTINUE_IF_ERR(ANeuralNetworksDevice_getName(device, &name));
+ CONTINUE_IF_ERR(ANeuralNetworksDevice_getType(device, &type));
+ CONTINUE_IF_ERR(ANeuralNetworksDevice_getVersion(device, &version));
+
+ std::cout << "Device: " << name << std::endl;
+ std::cout << "Feature Level: " << featureLevelString(featureLevel) << std::endl;
+ std::cout << "Type: " << deviceTypeString(type) << std::endl;
+ std::cout << "Version: " << version << std::endl;
+
+ std::cout << std::endl;
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/tools/test_generator/test_harness/include/TestHarness.h b/tools/test_generator/test_harness/include/TestHarness.h
index 743af80ae..d702c2a7f 100644
--- a/tools/test_generator/test_harness/include/TestHarness.h
+++ b/tools/test_generator/test_harness/include/TestHarness.h
@@ -366,6 +366,8 @@ struct TestModel {
return newTestModel;
}
+ bool hasControlFlow() const { return !referenced.empty(); }
+
bool hasQuant8CoupledOperands() const {
bool result = false;
forEachSubgraph([&result](const TestSubgraph& subgraph) {