summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTreehugger Robot <treehugger-gerrit@google.com>2020-11-20 16:13:34 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2020-11-20 16:13:34 +0000
commit4d7321d59ec45e7e8e3f216fe3a88d803b990258 (patch)
tree393400e347cac5b974dddc0b93060aef51e2317e
parented588c87fc323e9bb89ae00cbdd39ff206da852c (diff)
parent083da328b8899ce08f63fc564db444669f62b324 (diff)
downloadml-4d7321d59ec45e7e8e3f216fe3a88d803b990258.tar.gz
Merge changes from topic "nn-shared-handle"
* changes: Replace native_handle with unique_fd Split Utils into LegacyUtils and LegacyHalUtils Copy Utils to LegacyUtils and LegacyHalUtils Use sensible Capabilities in test drivers
-rw-r--r--nn/common/Android.bp6
-rw-r--r--nn/common/LegacyHalUtils.cpp1739
-rw-r--r--nn/common/LegacyUtils.cpp (renamed from nn/common/Utils.cpp)1700
-rw-r--r--nn/common/SharedMemoryAndroid.cpp124
-rw-r--r--nn/common/SharedMemoryHost.cpp57
-rw-r--r--nn/common/TypeUtils.cpp2
-rw-r--r--nn/common/Types.cpp32
-rw-r--r--nn/common/Validation.cpp14
-rw-r--r--nn/common/include/LegacyHalUtils.h389
-rw-r--r--nn/common/include/LegacyUtils.h313
-rw-r--r--nn/common/include/Utils.h592
-rw-r--r--nn/common/include/nnapi/IDevice.h8
-rw-r--r--nn/common/include/nnapi/TypeUtils.h2
-rw-r--r--nn/common/include/nnapi/Types.h23
-rw-r--r--nn/common/include/nnapi/Validation.h4
-rw-r--r--nn/runtime/Manager.cpp34
-rw-r--r--nn/runtime/VersionedInterfaces.cpp67
-rw-r--r--nn/runtime/VersionedInterfaces.h14
-rw-r--r--nn/runtime/test/HalUtils.h37
-rw-r--r--nn/runtime/test/TestExtensions.cpp3
-rw-r--r--nn/runtime/test/TestFailingDriver.cpp6
-rw-r--r--nn/runtime/test/TestPartitioning.cpp15
-rw-r--r--nn/runtime/test/TestRemoveDefaultArguments.cpp3
-rw-r--r--nn/runtime/test/TestVersionedInterfaces.cpp24
24 files changed, 2731 insertions, 2477 deletions
diff --git a/nn/common/Android.bp b/nn/common/Android.bp
index c6000438f..2fdfd5e45 100644
--- a/nn/common/Android.bp
+++ b/nn/common/Android.bp
@@ -86,8 +86,9 @@ cc_library_static {
srcs: [
"ExecutionBurstController.cpp",
"ExecutionBurstServer.cpp",
+ "LegacyHalUtils.cpp",
+ "LegacyUtils.cpp",
"MemoryUtils.cpp",
- "Utils.cpp",
],
header_libs: [
"gemmlowp_headers",
@@ -156,12 +157,13 @@ cc_library_static {
"ExecutionBurstServer.cpp",
"GraphDump.cpp",
"IndexedShapeWrapper.cpp",
+ "LegacyHalUtils.cpp",
+ "LegacyUtils.cpp",
"MemoryUtils.cpp",
"MetaModel.cpp",
"OperationsUtils.cpp",
"QuantUtils.cpp",
"TokenHasher.cpp",
- "Utils.cpp",
"ValidateHal.cpp",
"operations/ArgMinMax.cpp",
"operations/BidirectionalSequenceLSTM.cpp",
diff --git a/nn/common/LegacyHalUtils.cpp b/nn/common/LegacyHalUtils.cpp
new file mode 100644
index 000000000..d92c6ea0f
--- /dev/null
+++ b/nn/common/LegacyHalUtils.cpp
@@ -0,0 +1,1739 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Utils"
+
+#include "LegacyHalUtils.h"
+
+#include <nnapi/TypeUtils.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.3/Conversions.h>
+
+#include <algorithm>
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "NeuralNetworks.h"
+#include "ValidateHal.h"
+
+namespace android {
+namespace nn {
+
+constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
+
+static uint64_t getMaxNanosecondsSinceEpoch() {
+ const auto maxTime =
+ std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds>::max();
+ return maxTime.time_since_epoch().count();
+}
+
+std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint) {
+ using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator;
+ if (timePoint.getDiscriminator() == Discriminator::none) {
+ return std::nullopt;
+ }
+ const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch();
+ const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch();
+
+ // Clamp time point to max.
+ if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) {
+ return Deadline::max();
+ }
+
+ // Return provided time point.
+ return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}};
+}
+
+bool isExtensionOperandType(V1_3::OperandType type) {
+ return isExtensionOperandType(static_cast<OperandType>(type));
+}
+
+bool isExtensionOperationType(V1_3::OperationType type) {
+ return isExtensionOperationType(static_cast<OperationType>(type));
+}
+
+std::string getOperandTypeName(V1_3::OperandType type) {
+ return toString(type);
+}
+
+std::string getOperationName(V1_3::OperationType type) {
+ return toString(type);
+}
+
+uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions);
+}
+
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions);
+}
+
+bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
+ dimensions.size());
+}
+
+bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) {
+ return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
+ operand.dimensions.size());
+}
+
+void logModelToInfo(const V1_0::Model& model) {
+ LOG(INFO) << "V1_0::Model start";
+ LOG(INFO) << "operands" << toString(model.operands);
+ LOG(INFO) << "operations" << toString(model.operations);
+ LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
+ LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
+ LOG(INFO) << "operandValues size" << model.operandValues.size();
+ LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
+}
+
+void logModelToInfo(const V1_1::Model& model) {
+ LOG(INFO) << "V1_1::Model start";
+ LOG(INFO) << "operands" << toString(model.operands);
+ LOG(INFO) << "operations" << toString(model.operations);
+ LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
+ LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
+ LOG(INFO) << "operandValues size " << model.operandValues.size();
+ LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
+}
+
+void logModelToInfo(const V1_2::Model& model) {
+ LOG(INFO) << "V1_2::Model start";
+ LOG(INFO) << "operands" << toString(model.operands);
+ LOG(INFO) << "operations" << toString(model.operations);
+ LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
+ LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
+ LOG(INFO) << "operandValues size" << model.operandValues.size();
+ LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
+ LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16;
+ LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
+}
+
+static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) {
+ LOG(INFO) << label << ".operands" << toString(subgraph.operands);
+ LOG(INFO) << label << ".operations" << toString(subgraph.operations);
+ LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes);
+ LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes);
+}
+
+void logModelToInfo(const V1_3::Model& model) {
+ LOG(INFO) << "V1_3::Model start";
+ logSubgraphToInfo("main", model.main);
+ for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) {
+ logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]);
+ }
+ LOG(INFO) << "operandValues size " << model.operandValues.size();
+ LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
+ LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16;
+ LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
+}
+
+bool validateOperandSymmPerChannelQuantParams(
+ const V1_3::Operand& halOperand,
+ const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) {
+ if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
+ return false;
+ }
+
+ NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag;
+ NN_RET_CHECK(channelQuant.scales != nullptr) << tag;
+ NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag;
+ NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u)
+ << tag << " channel dimension " << channelQuant.channelDim << " is underspecified";
+ for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) {
+ NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]";
+ }
+ return true;
+}
+
+static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
+ HalVersion minSupportedHalVersion) {
+ if (halVersion < minSupportedHalVersion) {
+ LOG(ERROR) << "The given inputs and outputs for operation " << opType
+ << " are only supported in " << minSupportedHalVersion
+ << " and later (validating using " << halVersion << ")";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
+ const uint32_t* inputIndexes, uint32_t outputCount,
+ const uint32_t* outputIndexes,
+ const std::vector<Operand>& operands, HalVersion halVersion) {
+ if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) {
+ NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
+ LOG(ERROR) << "This validateOperation() overload does not support control flow";
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands,
+ halVersion, {});
+}
+
+V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) {
+ return convertToV1_3(convertResultCodeToErrorStatus(resultCode));
+}
+
+int convertErrorStatusToResultCode(V1_3::ErrorStatus status) {
+ return convertErrorStatusToResultCode(uncheckedConvert(status));
+}
+
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
+}
+
+// Capabilities::operandPerformance utilities.
+// The field Capabilities::operandPerformance is a vector sorted by the field
+// Capabilities::OperandPerformance::type.
+
+template <HalVersion version>
+hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ V1_0::PerformanceInfo perf) {
+ using OpPerf = VersionedOperandPerformance<version>;
+
+ // Note: range presents enumerators in declaration order, not in numerical order.
+ static constexpr hardware::hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
+
+ std::vector<OpPerf> ret;
+ ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin());
+ for (VersionedOperandType<version> type : kOperandTypeRange) {
+ if (static_cast<V1_3::OperandType>(type) != V1_3::OperandType::SUBGRAPH) {
+ ret.push_back(OpPerf{type, perf});
+ }
+ }
+ std::sort(ret.begin(), ret.end(),
+ [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
+
+ return ret;
+}
+
+template hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_2>(V1_0::PerformanceInfo perf);
+template hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_3>(V1_0::PerformanceInfo perf);
+
+template <HalVersion version>
+void update(hardware::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
+ VersionedOperandType<version> type, V1_0::PerformanceInfo perf) {
+ CHECK(operandPerformance != nullptr);
+ const auto it =
+ std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
+ [](const VersionedOperandPerformance<version>& perf,
+ VersionedOperandType<version> type) { return perf.type < type; });
+ CHECK(it != operandPerformance->end())
+ << toString(type) << " not in " << toString(*operandPerformance);
+ it->info = perf;
+}
+
+void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, V1_0::PerformanceInfo perf) {
+ update<HalVersion::V1_2>(operandPerformance, type, perf);
+}
+void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, V1_0::PerformanceInfo perf) {
+ update<HalVersion::V1_3>(operandPerformance, type, perf);
+}
+
+template <HalVersion version>
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
+ VersionedOperandType<version> type) {
+ const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
+ [](const VersionedOperandPerformance<version>& perf,
+ VersionedOperandType<version> type) {
+ return static_cast<V1_3::OperandType>(perf.type) <
+ static_cast<V1_3::OperandType>(type);
+ });
+ if (it == operandPerformance.end()) {
+ LOG(WARNING) << "No PerformanceInfo for " << toString(type);
+ return kNoPerformanceInfo;
+ } else {
+ return it->info;
+ }
+}
+
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type) {
+ return lookup<HalVersion::V1_2>(operandPerformance, type);
+}
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type) {
+ CHECK(type != V1_3::OperandType::SUBGRAPH)
+ << "Use Capabilities::ifPerformance or Capabilities::whilePerformance";
+ return lookup<HalVersion::V1_3>(operandPerformance, type);
+}
+
+// Versioning
+
+// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
+// This array must be in sorted order.
+static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = {
+ V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32,
+ V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE};
+
+static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
+ const V1_0::PerformanceInfo quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
+ return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
+ std::end(kQuantized8PerformanceConsistentWithP),
+ [quantized8Performance, &capabilities](V1_3::OperandType type) {
+ return quantized8Performance ==
+ lookup(capabilities.operandPerformance,
+ static_cast<V1_2::OperandType>(type));
+ });
+}
+
+static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
+ const V1_0::PerformanceInfo quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM);
+ return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
+ std::end(kQuantized8PerformanceConsistentWithP),
+ [quantized8Performance, &capabilities](V1_3::OperandType type) {
+ return quantized8Performance ==
+ lookup(capabilities.operandPerformance, type);
+ });
+}
+
+static hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
+makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) {
+ hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
+ std::size(kQuantized8PerformanceConsistentWithP));
+ std::transform(std::begin(kQuantized8PerformanceConsistentWithP),
+ std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
+ [quantized8Performance](
+ V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance {
+ return {static_cast<V1_2::OperandType>(type), quantized8Performance};
+ });
+ return ret;
+}
+
+bool compliantWithV1_0(const V1_0::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
+ return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance;
+}
+
+bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
+ const V1_0::PerformanceInfo perfTensorFloat32 =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
+ const V1_0::PerformanceInfo perfFloat32 =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
+ if (perfTensorFloat32 != perfFloat32 ||
+ perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
+ perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
+ return false;
+ }
+
+ return isQuantized8PerformanceConsistentWithP(capabilities);
+}
+
+bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
+ const V1_0::PerformanceInfo perfTensorFloat32 =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32);
+ const V1_0::PerformanceInfo perfFloat32 =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32);
+ if (perfTensorFloat32 != perfFloat32 ||
+ perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
+ perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
+ return false;
+ }
+
+ return isQuantized8PerformanceConsistentWithP(capabilities);
+}
+
+bool compliantWithV1_1(const V1_0::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_1(const V1_1::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
+ if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
+ capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
+ (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) !=
+ lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) {
+ return false;
+ }
+
+ return isQuantized8PerformanceConsistentWithP(capabilities);
+}
+
+bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
+ if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
+ capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
+ (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) !=
+ lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) {
+ return false;
+ }
+
+ return isQuantized8PerformanceConsistentWithP(capabilities);
+}
+
+bool compliantWithV1_2(const V1_0::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_1::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_2::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_3::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_0::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_1::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_2::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_3::Capabilities&) {
+ return true;
+}
+
+V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) {
+ return status;
+}
+
+V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) {
+ switch (status) {
+ case V1_3::ErrorStatus::NONE:
+ return V1_0::ErrorStatus::NONE;
+ case V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
+ return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
+ case V1_3::ErrorStatus::GENERAL_FAILURE:
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
+ return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ case V1_3::ErrorStatus::INVALID_ARGUMENT:
+ return V1_0::ErrorStatus::INVALID_ARGUMENT;
+ case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+ LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE";
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+}
+
+V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) {
+ return static_cast<V1_3::ErrorStatus>(status);
+}
+
+V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) {
+ return status;
+}
+
+static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
+ return static_cast<V1_0::OperationType>(type);
+}
+
+static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
+ return static_cast<V1_0::OperationType>(type);
+}
+
+V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) {
+ return static_cast<V1_0::OperationType>(type);
+}
+
+static V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
+ return static_cast<V1_1::OperationType>(type);
+}
+
+static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) {
+ return static_cast<V1_1::OperationType>(type);
+}
+
+V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) {
+ return static_cast<V1_1::OperationType>(type);
+}
+
+static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
+ return static_cast<V1_2::OperationType>(type);
+}
+
+static V1_2::OperationType convertToV1_2(V1_1::OperationType type) {
+ return static_cast<V1_2::OperationType>(type);
+}
+
+V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) {
+ return static_cast<V1_2::OperationType>(type);
+}
+
+static V1_3::OperationType convertToV1_3(V1_0::OperationType type) {
+ return static_cast<V1_3::OperationType>(type);
+}
+
+static V1_3::OperationType convertToV1_3(V1_1::OperationType type) {
+ return static_cast<V1_3::OperationType>(type);
+}
+
+static V1_3::OperationType convertToV1_3(V1_2::OperationType type) {
+ return static_cast<V1_3::OperationType>(type);
+}
+
+V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
+ return capabilities;
+}
+
+V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
+ if (!compliantWithV1_0(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_1::Capabilities to V1_0::Capabilities";
+ }
+ return {.float32Performance = capabilities.float32Performance,
+ .quantized8Performance = capabilities.quantized8Performance};
+}
+
+V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) {
+ if (!compliantWithV1_0(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_2::Capabilities to V1_0::Capabilities";
+ }
+ return {.float32Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance = lookup(capabilities.operandPerformance,
+ V1_2::OperandType::TENSOR_QUANT8_ASYMM)};
+}
+
+V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) {
+ if (!compliantWithV1_0(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_3::Capabilities to V1_0::Capabilities";
+ }
+ return {.float32Performance =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance = lookup(capabilities.operandPerformance,
+ V1_3::OperandType::TENSOR_QUANT8_ASYMM)};
+}
+
+V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
+ return {.float32Performance = capabilities.float32Performance,
+ .quantized8Performance = capabilities.quantized8Performance,
+ .relaxedFloat32toFloat16Performance = capabilities.float32Performance};
+}
+
+V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
+ return capabilities;
+}
+
+V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) {
+ if (!compliantWithV1_1(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_2::Capabilities to V1_1::Capabilities";
+ }
+ return {.float32Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM),
+ .relaxedFloat32toFloat16Performance =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor};
+}
+
+V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) {
+ if (!compliantWithV1_1(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_3::Capabilities to V1_1::Capabilities";
+ }
+ return {.float32Performance =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM),
+ .relaxedFloat32toFloat16Performance =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor};
+}
+
+V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) {
+ V1_2::Capabilities ret = {
+ .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance,
+ .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance,
+ .operandPerformance =
+ makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
+ auto& opPerf = ret.operandPerformance;
+ opPerf.resize(opPerf.size() + 2);
+ opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
+ capabilities.float32Performance};
+ opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
+ using OperandPerformance = V1_2::Capabilities::OperandPerformance;
+ std::sort(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ });
+ return ret;
+}
+
+V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) {
+ V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar =
+ capabilities.relaxedFloat32toFloat16Performance,
+ .relaxedFloat32toFloat16PerformanceTensor =
+ capabilities.relaxedFloat32toFloat16Performance,
+ .operandPerformance = makeQuantized8PerformanceConsistentWithP(
+ capabilities.quantized8Performance)};
+ auto& opPerf = ret.operandPerformance;
+ opPerf.resize(opPerf.size() + 2);
+ opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
+ capabilities.float32Performance};
+ opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
+ using OperandPerformance = V1_2::Capabilities::OperandPerformance;
+ std::sort(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ });
+ return ret;
+}
+
+V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) {
+ return capabilities;
+}
+
+V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) {
+ V1_2::Capabilities ret = {
+ .relaxedFloat32toFloat16PerformanceScalar =
+ capabilities.relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor,
+ };
+ const auto& inputOpPerf = capabilities.operandPerformance;
+ hardware::hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
+ opPerfSupported.resize(inputOpPerf.size());
+ auto last =
+ std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
+ [](V1_3::Capabilities::OperandPerformance opPerf) {
+ return validOperandType(static_cast<V1_2::OperandType>(opPerf.type));
+ });
+ opPerfSupported.resize(std::distance(opPerfSupported.begin(), last));
+
+ auto& convertedOpPerf = ret.operandPerformance;
+ convertedOpPerf.resize(opPerfSupported.size());
+ std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(),
+ [](V1_3::Capabilities::OperandPerformance opPerf) {
+ return V1_2::Capabilities::OperandPerformance{
+ static_cast<V1_2::OperandType>(opPerf.type), opPerf.info};
+ });
+ return ret;
+}
+
+V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) {
+ return convertToV1_3(convertToV1_2(capabilities));
+}
+
+V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) {
+ return convertToV1_3(convertToV1_2(capabilities));
+}
+
+V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) {
+ V1_3::Capabilities ret = {
+ .relaxedFloat32toFloat16PerformanceScalar =
+ capabilities.relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor,
+ .ifPerformance = kNoPerformanceInfo,
+ .whilePerformance = kNoPerformanceInfo,
+ };
+ auto& opPerf = ret.operandPerformance;
+ opPerf.resize(capabilities.operandPerformance.size());
+ std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
+ opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) {
+ return V1_3::Capabilities::OperandPerformance{
+ static_cast<V1_3::OperandType>(opPerf.type), opPerf.info};
+ });
+ return ret;
+}
+
+V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) {
+ return capabilities;
+}
+
+static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
+ return {.type = uncheckedConvertToV1_0(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
+ return {.type = convertToV1_1(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_1::Operation> convertToV1_1(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
+ return result;
+}
+
+bool compliantWithV1_0(const V1_3::Operand& operand) {
+ return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
+ (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
+ operand.dimensions.size() != 0) &&
+ compliantWithV1_0(operand.lifetime);
+}
+
+bool compliantWithV1_2(const V1_3::Operand& operand) {
+ return validOperandType(static_cast<V1_2::OperandType>(operand.type)) &&
+ compliantWithV1_0(operand.lifetime);
+}
+
+bool compliantWithV1_3(const V1_3::Operand& operand) {
+ return true;
+}
+
+static bool compliantWith(HalVersion version, const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
+ // A boolean vector indicating whether each pool is compliant with the target HAL version.
+ std::vector<bool> isPoolCompliant(model.pools.size(), false);
+ std::transform(
+ model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
+ [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); });
+
+ // A boolean vector indicating whether each operand is compliant with the target HAL version.
+ std::vector<bool> isOperandCompliant(model.main.operands.size(), false);
+ std::transform(model.main.operands.begin(), model.main.operands.end(),
+ isOperandCompliant.begin(),
+ [&isPoolCompliant, version](const V1_3::Operand& op) {
+ bool is_operand_compliant = false;
+ switch (version) {
+ case HalVersion::UNKNOWN:
+ is_operand_compliant = false;
+ break;
+ case HalVersion::V1_0:
+ is_operand_compliant = compliantWithV1_0(op);
+ break;
+ case HalVersion::V1_1:
+ // There is no V1_1::Operand -- both V1_0::Model
+ // and V1_1::Model use V1_0::Operand.
+ is_operand_compliant = compliantWithV1_0(op);
+ break;
+ case HalVersion::V1_2:
+ is_operand_compliant = compliantWithV1_2(op);
+ break;
+ case HalVersion::V1_3:
+ is_operand_compliant = compliantWithV1_3(op);
+ break;
+ }
+ return is_operand_compliant &&
+ !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE &&
+ !isPoolCompliant[op.location.poolIndex]);
+ });
+
+ auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec<uint32_t>& indices) {
+ return std::all_of(
+ indices.begin(), indices.end(),
+ [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
+ };
+
+ auto localValidateOperation = [&model, version,
+ &allOperandsCompliant](const V1_3::Operation& op) {
+ if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
+ int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr,
+ uncheckedConvert(model.main.operands), version);
+ return error == ANEURALNETWORKS_NO_ERROR;
+ };
+
+ if (noncompliantOperations) {
+ CHECK(noncompliantOperations->empty());
+ for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) {
+ if (!localValidateOperation(model.main.operations[idx])) {
+ noncompliantOperations->insert(idx);
+ }
+ }
+ return noncompliantOperations->empty();
+ } else {
+ return std::all_of(model.main.operations.begin(), model.main.operations.end(),
+ localValidateOperation);
+ }
+}
+
+bool compliantWithV1_0(const V1_0::Model& model) {
+ return true;
+}
+
+bool compliantWithV1_0(const V1_1::Model& model) {
+ // In addition to new enumeration values being introduced in V1_1::Model, a
+ // new flag was introduced to indicate whether or not float32 data can be
+ // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
+ // flag is not relevant in whether a V1_1::Model is compliant with a
+ // V1_0::Model because all 1.0 drivers require strict calculation by default
+ // in the P NN runtime. Even if fp16 calculations are allowed, they can
+ // still be computed by a strict fp32 driver.
+ auto operands = uncheckedConvert(convertToV1_3(model.operands));
+ return std::all_of(model.operations.begin(), model.operations.end(),
+ [&operands](const V1_1::Operation& op) {
+ int error = validateOperation(
+ static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands,
+ HalVersion::V1_0);
+ return error == ANEURALNETWORKS_NO_ERROR;
+ });
+}
+
+bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations);
+}
+
+bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
+}
+
+bool compliantWithV1_1(const V1_0::Model&) {
+ return true;
+}
+
+bool compliantWithV1_1(const V1_1::Model&) {
+ return true;
+}
+
+bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations);
+}
+
+bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
+}
+
+bool compliantWithV1_2(const V1_0::Model&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_1::Model&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* noncompliantOperations) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_2, model, noncompliantOperations);
+}
+
+static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) {
+ return {.type = uncheckedConvertToV1_0(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) {
+ return {.type = uncheckedConvertToV1_0(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
+ return {.type = uncheckedConvertToV1_1(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) {
+ return {.type = uncheckedConvertToV1_1(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
+ return {.type = convertToV1_2(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) {
+ return {.type = convertToV1_2(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) {
+ return {.type = uncheckedConvertToV1_2(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) {
+ return {.type = convertToV1_3(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) {
+ return {.type = convertToV1_3(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) {
+ return {.type = convertToV1_3(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_0::Operation& operation) { return convertToV1_2(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_1::Operation& operation) { return convertToV1_2(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_0::Operation& operation) { return convertToV1_3(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_1::Operation& operation) { return convertToV1_3(operation); });
+ return result;
+}
+
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_2::Operation& operation) { return convertToV1_3(operation); });
+ return result;
+}
+
+static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
+ return validOperandType(static_cast<V1_0::OperandType>(operandType));
+}
+
+static bool compliantWithV1_0(const V1_3::OperandType& operandType) {
+ return validOperandType(static_cast<V1_0::OperandType>(operandType));
+}
+
+static bool compliantWithV1_2(const V1_3::OperandType& operandType) {
+ return validOperandType(static_cast<V1_2::OperandType>(operandType));
+}
+
+V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
+ if (!compliantWithV1_0(operandType)) {
+ LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
+ << " from V1_2::OperandType to V1_0::OperandType";
+ }
+ return static_cast<V1_0::OperandType>(operandType);
+}
+
+V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
+ return static_cast<V1_2::OperandType>(operandType);
+}
+
+V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) {
+ if (!compliantWithV1_2(operandType)) {
+ LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
+ << " from V1_3::OperandType to V1_2::OperandType";
+ }
+ return static_cast<V1_2::OperandType>(operandType);
+}
+
+V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) {
+ if (!compliantWithV1_0(operandType)) {
+ LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
+ << " from V1_3::Operand to V1_0::Operand";
+ }
+ return static_cast<V1_0::OperandType>(operandType);
+}
+
+bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) {
+ return true;
+}
+
+bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) {
+ return lifetime != V1_3::OperandLifeTime::SUBGRAPH;
+}
+
+bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) {
+ return true;
+}
+
+bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) {
+ return true;
+}
+
+V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) {
+ return lifetime;
+}
+
+V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) {
+ if (!compliantWithV1_0(lifetime)) {
+ LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime)
+ << " from V1_3 to V1_0";
+ }
+ return static_cast<V1_0::OperandLifeTime>(lifetime);
+}
+
+V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) {
+ return static_cast<V1_3::OperandLifeTime>(lifetime);
+}
+
+V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) {
+ return lifetime;
+}
+
+V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
+ return {.type = convertToV1_0(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = convertToV1_0(operand.lifetime),
+ .location = operand.location};
+}
+
+V1_0::Operand convertToV1_0(const V1_3::Operand& operand) {
+ return {.type = convertToV1_0(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = convertToV1_0(operand.lifetime),
+ .location = operand.location};
+}
+
+V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
+ return {.type = convertToV1_2(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = operand.lifetime,
+ .location = operand.location};
+}
+
+V1_2::Operand convertToV1_2(const V1_3::Operand& operand) {
+ return {.type = convertToV1_2(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime),
+ .location = operand.location,
+ .extraParams = operand.extraParams};
+}
+
+V1_3::Operand convertToV1_3(const V1_0::Operand& operand) {
+ return {.type = static_cast<V1_3::OperandType>(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = convertToV1_3(operand.lifetime),
+ .location = operand.location};
+}
+
+V1_3::Operand convertToV1_3(const V1_2::Operand& operand) {
+ return {.type = static_cast<V1_3::OperandType>(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = convertToV1_3(operand.lifetime),
+ .location = operand.location,
+ .extraParams = operand.extraParams};
+}
+
+V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
+ return operand;
+}
+
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ return operands;
+}
+
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ hardware::hidl_vec<V1_0::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
+ return result;
+}
+
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ hardware::hidl_vec<V1_0::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_3::Operand& operand) { return convertToV1_0(operand); });
+ return result;
+}
+
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ hardware::hidl_vec<V1_2::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_0::Operand& operand) { return convertToV1_2(operand); });
+ return result;
+}
+
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ return operands;
+}
+
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ hardware::hidl_vec<V1_2::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_3::Operand& operand) { return convertToV1_2(operand); });
+ return result;
+}
+
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ hardware::hidl_vec<V1_3::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_0::Operand& operand) { return convertToV1_3(operand); });
+ return result;
+}
+
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ hardware::hidl_vec<V1_3::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_2::Operand& operand) { return convertToV1_3(operand); });
+ return result;
+}
+
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ return operands;
+}
+
+V1_0::Model convertToV1_0(const V1_0::Model& model) {
+ return model;
+}
+
+V1_0::Model convertToV1_0(const V1_1::Model& model) {
+ if (!compliantWithV1_0(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_1::Model to V1_0::Model";
+ }
+ return {.operands = model.operands,
+ .operations = uncheckedConvertToV1_0(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools};
+}
+
+V1_0::Model convertToV1_0(const V1_2::Model& model) {
+ if (!compliantWithV1_0(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_2::Model to V1_0::Model";
+ }
+ return {.operands = convertToV1_0(model.operands),
+ .operations = uncheckedConvertToV1_0(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools};
+}
+
+V1_0::Model convertToV1_0(const V1_3::Model& model) {
+ if (!compliantWithV1_0(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_3::Model to V1_0::Model";
+ }
+ return {.operands = convertToV1_0(model.main.operands),
+ .operations = uncheckedConvertToV1_0(model.main.operations),
+ .inputIndexes = model.main.inputIndexes,
+ .outputIndexes = model.main.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools};
+}
+
+V1_1::Model convertToV1_1(const V1_0::Model& model) {
+ return {.operands = model.operands,
+ .operations = convertToV1_1(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = false};
+}
+
+V1_1::Model convertToV1_1(const V1_1::Model& model) {
+ return model;
+}
+
+V1_1::Model convertToV1_1(const V1_2::Model& model) {
+ if (!compliantWithV1_1(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_2::Model to V1_1::Model";
+ }
+ return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
+ .operations = uncheckedConvertToV1_1(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
+}
+
+V1_1::Model convertToV1_1(const V1_3::Model& model) {
+ if (!compliantWithV1_1(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_3::Model to V1_1::Model";
+ }
+ return {// Operands in 1.1 and 1.0 are identical.
+ .operands = convertToV1_0(model.main.operands),
+ .operations = uncheckedConvertToV1_1(model.main.operations),
+ .inputIndexes = model.main.inputIndexes,
+ .outputIndexes = model.main.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
+}
+
+V1_2::Model convertToV1_2(const V1_0::Model& model) {
+ return {.operands = convertToV1_2(model.operands),
+ .operations = convertToV1_2(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = false};
+}
+
+V1_2::Model convertToV1_2(const V1_1::Model& model) {
+ return {.operands = convertToV1_2(model.operands),
+ .operations = convertToV1_2(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
+}
+
+V1_2::Model convertToV1_2(const V1_2::Model& model) {
+ return model;
+}
+
+V1_2::Model convertToV1_2(const V1_3::Model& model) {
+ if (!compliantWithV1_2(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_3::Model to V1_2::Model";
+ }
+ return {.operands = convertToV1_2(model.main.operands),
+ .operations = uncheckedConvertToV1_2(model.main.operations),
+ .inputIndexes = model.main.inputIndexes,
+ .outputIndexes = model.main.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
+ .extensionNameToPrefix = model.extensionNameToPrefix};
+}
+
+V1_3::Model convertToV1_3(const V1_0::Model& model) {
+ return {.main = {.operands = convertToV1_3(model.operands),
+ .operations = convertToV1_3(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes},
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = false};
+}
+
+V1_3::Model convertToV1_3(const V1_1::Model& model) {
+ return {.main = {.operands = convertToV1_3(model.operands),
+ .operations = convertToV1_3(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes},
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
+}
+
+V1_3::Model convertToV1_3(const V1_2::Model& model) {
+ return {.main = {.operands = convertToV1_3(model.operands),
+ .operations = convertToV1_3(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes},
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
+ .extensionNameToPrefix = model.extensionNameToPrefix};
+}
+
+V1_3::Model convertToV1_3(const V1_3::Model& model) {
+ return model;
+}
+
+bool compliantWithV1_0(const V1_0::Request& request) {
+ return true;
+}
+
+bool compliantWithV1_0(const V1_3::Request& request) {
+ return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
+ if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) {
+ return false;
+ }
+ const auto& name = pool.hidlMemory().name();
+ return name == "ashmem" || name == "mmap_fd";
+ });
+}
+
+bool compliantWithV1_2(const V1_3::Request& request) {
+ return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
+ if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) {
+ return false;
+ }
+ const auto& name = pool.hidlMemory().name();
+ return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" ||
+ name == "hardware_buffer";
+ });
+}
+
+static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
+ switch (pool.getDiscriminator()) {
+ case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
+ return pool.hidlMemory();
+ case V1_3::Request::MemoryPool::hidl_discriminator::token:
+ return hardware::hidl_memory{};
+ }
+}
+
+static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) {
+ V1_3::Request::MemoryPool ret;
+ ret.hidlMemory(pool);
+ return ret;
+}
+
+V1_0::Request convertToV1_0(const V1_0::Request& request) {
+ return request;
+}
+
+static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) {
+ hardware::hidl_vec<hardware::hidl_memory> pools(request.pools.size());
+ std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
+ [](const auto& pool) { return convertToV1_0(pool); });
+ return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
+}
+
+V1_0::Request convertToV1_0(const V1_3::Request& request) {
+ if (!compliantWithV1_0(request)) {
+ LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
+ << " from V1_3::Request to V1_0::Request of version 1.0";
+ }
+ return uncheckedConvertToV1_0(request);
+}
+
+V1_0::Request convertToV1_2(const V1_3::Request& request) {
+ if (!compliantWithV1_2(request)) {
+ LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
+ << " from V1_3::Request to V1_0::Request of version 1.2";
+ }
+ return uncheckedConvertToV1_0(request);
+}
+
+V1_3::Request convertToV1_3(const V1_0::Request& request) {
+ hardware::hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
+ std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
+ [](const auto& pool) { return convertToV1_3(pool); });
+ return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
+}
+
+V1_3::Request convertToV1_3(const V1_3::Request& request) {
+ return request;
+}
+
+ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) {
+ return nnTryGetValue(convert(status));
+}
+
+ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) {
+ return nnTryGetValue(convert(status));
+}
+
+OperandType uncheckedConvert(V1_3::OperandType operandType) {
+ return nnTryGetValue(convert(operandType));
+}
+
+OperationType uncheckedConvert(V1_3::OperationType operandType) {
+ return nnTryGetValue(convert(operandType));
+}
+
+Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) {
+ return nnTryGetValue(convert(lifetime));
+}
+
+MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) {
+ return nnTryGetValue(convert(measure));
+}
+
+DataLocation uncheckedConvert(const V1_0::DataLocation& location) {
+ return nnTryGetValue(convert(location));
+}
+
+Operand uncheckedConvert(const V1_3::Operand& operand) {
+ return nnTryGetValue(convert(operand));
+}
+
+Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) {
+ return nnTryGetValue(convert(params));
+}
+
+Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) {
+ return nnTryGetValue(convert(params));
+}
+
+Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params) {
+ return params;
+}
+
+Operation uncheckedConvert(const V1_3::Operation& operation) {
+ return nnTryGetValue(convert(operation));
+}
+
+template <typename CanonicalType, typename HalType>
+static std::vector<CanonicalType> convertVec(const hardware::hidl_vec<HalType>& items) {
+ std::vector<CanonicalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const HalType& item) { return uncheckedConvert(item); });
+ return result;
+}
+
+Model uncheckedConvert(const V1_3::Model& model) {
+ return nnTryGetValue(convert(model));
+}
+
+Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) {
+ return nnTryGetValue(convert(subgraph));
+}
+
+Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) {
+ return nnTryGetValue(convert(x));
+}
+
+Request uncheckedConvert(const V1_3::Request& request) {
+ return nnTryGetValue(convert(request));
+}
+
+Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) {
+ return nnTryGetValue(convert(requestArgument));
+}
+
+Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) {
+ return nnTryGetValue(convert(memoryPool));
+}
+
+OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) {
+ return nnTryGetValue(convert(outputShape));
+}
+
+std::vector<OutputShape> uncheckedConvert(
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) {
+ return convertVec<OutputShape>(outputShapes);
+}
+
+Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) {
+ return nnTryGetValue(convert(capabilities));
+}
+
+Capabilities::OperandPerformance uncheckedConvert(
+ const V1_3::Capabilities::OperandPerformance& operandPerformance) {
+ return nnTryGetValue(convert(operandPerformance));
+}
+
+Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) {
+ return nnTryGetValue(convert(performanceInfo));
+}
+
+Extension uncheckedConvert(const V1_2::Extension& extension) {
+ return nnTryGetValue(convert(extension));
+}
+
+std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions) {
+ return convertVec<Extension>(extensions);
+}
+
+Extension::OperandTypeInformation uncheckedConvert(
+ const V1_2::Extension::OperandTypeInformation& info) {
+ return nnTryGetValue(convert(info));
+}
+
+OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) {
+ return nnTryGetValue(convert(timeoutDuration));
+}
+
+Timing uncheckedConvert(const V1_2::Timing& timing) {
+ return nnTryGetValue(convert(timing));
+}
+
+V1_0::ErrorStatus convertToV1_0(ErrorStatus status) {
+ return static_cast<V1_0::ErrorStatus>(static_cast<int>(status));
+}
+
+V1_3::ErrorStatus convertToV1_3(ErrorStatus status) {
+ return nnTryGetValue(V1_3::utils::convert(status));
+}
+
+V1_3::OperandType convertToV1_3(OperandType operandType) {
+ return nnTryGetValue(V1_3::utils::convert(operandType));
+}
+
+V1_3::OperationType convertToV1_3(OperationType operandType) {
+ return nnTryGetValue(V1_3::utils::convert(operandType));
+}
+
+V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) {
+ return nnTryGetValue(V1_3::utils::convert(lifetime));
+}
+
+V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) {
+ return nnTryGetValue(V1_1::utils::convert(preference));
+}
+
+V1_3::Priority convertToV1_3(Priority priority) {
+ return nnTryGetValue(V1_3::utils::convert(priority));
+}
+
+V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) {
+ return nnTryGetValue(V1_2::utils::convert(measure));
+}
+
+V1_0::DataLocation convertToV1_0(const DataLocation& location) {
+ return nnTryGetValue(V1_0::utils::convert(location));
+}
+
+V1_3::Operand convertToV1_3(const Operand& operand) {
+ return nnTryGetValue(V1_3::utils::convert(operand));
+}
+
+V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) {
+ return nnTryGetValue(V1_2::utils::convert(params));
+}
+
+V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) {
+ return nnTryGetValue(V1_2::utils::convert(params));
+}
+
+hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params) {
+ return params;
+}
+
+V1_3::Operation convertToV1_3(const Operation& operation) {
+ return nnTryGetValue(V1_3::utils::convert(operation));
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_0(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_0(item); });
+ return result;
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_2(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_2(item); });
+ return result;
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_3(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_3(item); });
+ return result;
+}
+
+V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) {
+ return nnTryGetValue(V1_2::utils::convert(outputShape));
+}
+
+hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes) {
+ return convertVecToV1_2<V1_2::OutputShape>(outputShapes);
+}
+
+V1_3::Model convertToV1_3(const Model& model) {
+ return nnTryGetValue(V1_3::utils::convert(model));
+}
+
+V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) {
+ return nnTryGetValue(V1_3::utils::convert(subgraph));
+}
+
+V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) {
+ return nnTryGetValue(V1_2::utils::convert(x));
+}
+
+V1_3::Request convertToV1_3(const Request& request) {
+ return nnTryGetValue(V1_3::utils::convert(request));
+}
+
+V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) {
+ return nnTryGetValue(V1_0::utils::convert(requestArgument));
+}
+
+V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) {
+ return nnTryGetValue(V1_3::utils::convert(memoryPool));
+}
+
+std::vector<Request::MemoryPool> uncheckedConvert(
+ const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools) {
+ return convertVec<Request::MemoryPool>(memoryPools);
+}
+
+V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) {
+ return nnTryGetValue(V1_3::utils::convert(timePoint));
+}
+
+V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) {
+ return nnTryGetValue(V1_3::utils::convert(timeoutDuration));
+}
+
+V1_2::Timing convertToV1_2(const Timing& timing) {
+ return nnTryGetValue(V1_2::utils::convert(timing));
+}
+
+V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) {
+ return nnTryGetValue(V1_3::utils::convert(bufferRole));
+}
+
+hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles) {
+ return convertVecToV1_3<V1_3::BufferRole>(bufferRoles);
+}
+
+hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues) {
+ return nnTryGetValue(V1_0::utils::convert(operandValues));
+}
+
+hardware::hidl_memory convertToV1_0(const Memory& memory) {
+ return nnTryGetValue(V1_0::utils::convert(memory));
+}
+
+Memory uncheckedConvert(const hardware::hidl_memory& memory) {
+ return nnTryGetValue(convert(memory));
+}
+
+hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories) {
+ return convertVecToV1_0<hardware::hidl_memory>(memories);
+}
+
+std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories) {
+ return convertVec<Memory>(memories);
+}
+
+std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs) {
+ return convertVec<Model::Subgraph>(subgraphs);
+}
+
+std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ return convertVec<Operand>(operands);
+}
+
+} // namespace nn
+} // namespace android
diff --git a/nn/common/Utils.cpp b/nn/common/LegacyUtils.cpp
index 7417ed8bf..52acda864 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/LegacyUtils.cpp
@@ -16,25 +16,19 @@
#define LOG_TAG "Utils"
-#include "Utils.h"
+#include "LegacyUtils.h"
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <android-base/strings.h>
#include <errno.h>
-#include <nnapi/hal/1.0/Conversions.h>
-#include <nnapi/hal/1.1/Conversions.h>
-#include <nnapi/hal/1.2/Conversions.h>
-#include <nnapi/hal/1.3/Conversions.h>
+#include <nnapi/TypeUtils.h>
#include <poll.h>
#include <algorithm>
-#include <cfloat>
#include <functional>
-#include <iostream>
#include <limits>
#include <numeric>
-#include <set>
#include <string>
#include <tuple>
#include <unordered_map>
@@ -45,14 +39,10 @@
#include "NeuralNetworks.h"
#include "NeuralNetworksOEM.h"
#include "OperationResolver.h"
-#include "ValidateHal.h"
-#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
-
const char kVLogPropKey[] = "debug.nn.vlog";
int vLogMask = ~0;
@@ -123,29 +113,6 @@ Deadline makeDeadline(TimeoutDuration duration) {
return currentTime + duration;
}
-static uint64_t getMaxNanosecondsSinceEpoch() {
- const auto maxTime =
- std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds>::max();
- return maxTime.time_since_epoch().count();
-}
-
-std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint) {
- using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator;
- if (timePoint.getDiscriminator() == Discriminator::none) {
- return std::nullopt;
- }
- const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch();
- const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch();
-
- // Clamp time point to max.
- if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) {
- return Deadline::max();
- }
-
- // Return provided time point.
- return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}};
-}
-
bool hasDeadlinePassed(const std::optional<Deadline>& deadline) {
if (!deadline.has_value()) {
return false;
@@ -169,11 +136,11 @@ static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
}
-bool isExtensionOperandType(V1_3::OperandType type) {
+bool isExtensionOperandType(OperandType type) {
return isExtensionOperandType(static_cast<int32_t>(type));
}
-bool isExtensionOperationType(V1_3::OperationType type) {
+bool isExtensionOperationType(OperationType type) {
return isExtensionOperationType(static_cast<int32_t>(type));
}
@@ -297,14 +264,6 @@ Shape OperationValidationContext::getOutputShape(uint32_t index) const {
#define COUNT(X) (sizeof(X) / sizeof(X[0]))
-std::string getOperandTypeName(V1_3::OperandType type) {
- return toString(type);
-}
-
-std::string getOperationName(V1_3::OperationType type) {
- return toString(type);
-}
-
const uint32_t kSizeOfDataType[]{
4, // ANEURALNETWORKS_FLOAT32
4, // ANEURALNETWORKS_INT32
@@ -374,11 +333,6 @@ uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint3
return size;
}
-uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
- const std::vector<uint32_t>& dimensions) {
- return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions);
-}
-
// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t.
static std::pair<bool, uint32_t> sizeOfTensorDataHelper(uint32_t sizeOfElement,
const std::vector<uint32_t>& dimensions) {
@@ -410,11 +364,6 @@ bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
: sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions);
}
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
- const std::vector<uint32_t>& dimensions) {
- return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions);
-}
-
bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,
const std::vector<uint32_t>& dimensions) {
return sizeOfTensorDataHelper(sizeOfElement, dimensions).first;
@@ -433,12 +382,6 @@ bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t
dimensions.size());
}
-bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
- const std::vector<uint32_t>& dimensions) {
- return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
- dimensions.size());
-}
-
bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
}
@@ -447,11 +390,6 @@ bool tensorHasUnspecifiedDimensions(const Operand& operand) {
return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions);
}
-bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) {
- return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
- operand.dimensions.size());
-}
-
uint32_t alignBytesNeeded(uint32_t index, size_t length) {
uint32_t pattern;
if (length < 2) {
@@ -465,78 +403,8 @@ uint32_t alignBytesNeeded(uint32_t index, size_t length) {
return extra;
}
-void logModelToInfo(const V1_0::Model& model) {
- LOG(INFO) << "V1_0::Model start";
- LOG(INFO) << "operands" << toString(model.operands);
- LOG(INFO) << "operations" << toString(model.operations);
- LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
- LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
- LOG(INFO) << "operandValues size" << model.operandValues.size();
- LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
-}
-
-void logModelToInfo(const V1_1::Model& model) {
- LOG(INFO) << "V1_1::Model start";
- LOG(INFO) << "operands" << toString(model.operands);
- LOG(INFO) << "operations" << toString(model.operations);
- LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
- LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
- LOG(INFO) << "operandValues size " << model.operandValues.size();
- LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
-}
-
-void logModelToInfo(const V1_2::Model& model) {
- LOG(INFO) << "V1_2::Model start";
- LOG(INFO) << "operands" << toString(model.operands);
- LOG(INFO) << "operations" << toString(model.operations);
- LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
- LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
- LOG(INFO) << "operandValues size" << model.operandValues.size();
- LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
- LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16;
- LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
-}
-
-static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) {
- LOG(INFO) << label << ".operands" << toString(subgraph.operands);
- LOG(INFO) << label << ".operations" << toString(subgraph.operations);
- LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes);
- LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes);
-}
-
-void logModelToInfo(const V1_3::Model& model) {
- LOG(INFO) << "V1_3::Model start";
- logSubgraphToInfo("main", model.main);
- for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) {
- logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]);
- }
- LOG(INFO) << "operandValues size " << model.operandValues.size();
- LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
- LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16;
- LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
-}
-
void logModelToInfo(const Model& model) {
- LOG(INFO) << "Model start";
- logModelToInfo(convertToV1_3(model));
-}
-
-bool validateOperandSymmPerChannelQuantParams(
- const V1_3::Operand& halOperand,
- const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) {
- if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- return false;
- }
-
- NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag;
- NN_RET_CHECK(channelQuant.scales != nullptr) << tag;
- NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag;
- NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u)
- << tag << " channel dimension " << channelQuant.channelDim << " is underspecified";
- for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) {
- NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]";
- }
- return true;
+ LOG(INFO) << model;
}
static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
@@ -871,19 +739,6 @@ static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs,
return true;
}
-static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
- const uint32_t* inputIndexes, uint32_t outputCount,
- const uint32_t* outputIndexes,
- const std::vector<Operand>& operands, HalVersion halVersion) {
- if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) {
- NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
- LOG(ERROR) << "This validateOperation() overload does not support control flow";
- return ANEURALNETWORKS_BAD_DATA;
- }
- return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands,
- halVersion, {});
-}
-
int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
const uint32_t* inputIndexes, uint32_t outputCount,
const uint32_t* outputIndexes, const std::vector<Operand>& operands,
@@ -1571,7 +1426,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
}
// Validate that output shape is equal to input shape if dimensions
// are already known.
- auto getNumberOfElements = [](const hardware::hidl_vec<uint32_t>& dims) {
+ auto getNumberOfElements = [](const std::vector<uint32_t>& dims) {
if (dims.size() == 0) {
return 0;
}
@@ -1955,21 +1810,6 @@ int convertErrorStatusToResultCode(ErrorStatus status) {
return ANEURALNETWORKS_OP_FAILED;
}
-V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) {
- return convertToV1_3(convertResultCodeToErrorStatus(resultCode));
-}
-
-int convertErrorStatusToResultCode(V1_3::ErrorStatus status) {
- return convertErrorStatusToResultCode(uncheckedConvert(status));
-}
-
-std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
- V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
- const V1_2::Timing& timing) {
- return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes),
- uncheckedConvert(timing));
-}
-
std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
@@ -1987,1246 +1827,6 @@ std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
return {n, std::move(outputShapes), timing};
}
-// Capabilities::operandPerformance utilities.
-// The field Capabilities::operandPerformance is a vector sorted by the field
-// Capabilities::OperandPerformance::type.
-
-template <HalVersion version>
-hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
- V1_0::PerformanceInfo perf) {
- using OpPerf = VersionedOperandPerformance<version>;
-
- // Note: range presents enumerators in declaration order, not in numerical order.
- static constexpr hardware::hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
-
- std::vector<OpPerf> ret;
- ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin());
- for (VersionedOperandType<version> type : kOperandTypeRange) {
- if (static_cast<V1_3::OperandType>(type) != V1_3::OperandType::SUBGRAPH) {
- ret.push_back(OpPerf{type, perf});
- }
- }
- std::sort(ret.begin(), ret.end(),
- [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
-
- return ret;
-}
-
-template hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
-nonExtensionOperandPerformance<HalVersion::V1_2>(V1_0::PerformanceInfo perf);
-template hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>
-nonExtensionOperandPerformance<HalVersion::V1_3>(V1_0::PerformanceInfo perf);
-
-template <HalVersion version>
-void update(hardware::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
- VersionedOperandType<version> type, V1_0::PerformanceInfo perf) {
- CHECK(operandPerformance != nullptr);
- const auto it =
- std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
- [](const VersionedOperandPerformance<version>& perf,
- VersionedOperandType<version> type) { return perf.type < type; });
- CHECK(it != operandPerformance->end())
- << toString(type) << " not in " << toString(*operandPerformance);
- it->info = perf;
-}
-
-void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
- V1_2::OperandType type, V1_0::PerformanceInfo perf) {
- update<HalVersion::V1_2>(operandPerformance, type, perf);
-}
-void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
- V1_3::OperandType type, V1_0::PerformanceInfo perf) {
- update<HalVersion::V1_3>(operandPerformance, type, perf);
-}
-
-template <HalVersion version>
-V1_0::PerformanceInfo lookup(
- const hardware::hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
- VersionedOperandType<version> type) {
- const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
- [](const VersionedOperandPerformance<version>& perf,
- VersionedOperandType<version> type) {
- return static_cast<V1_3::OperandType>(perf.type) <
- static_cast<V1_3::OperandType>(type);
- });
- if (it == operandPerformance.end()) {
- LOG(WARNING) << "No PerformanceInfo for " << toString(type);
- return kNoPerformanceInfo;
- } else {
- return it->info;
- }
-}
-
-V1_0::PerformanceInfo lookup(
- const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
- V1_2::OperandType type) {
- return lookup<HalVersion::V1_2>(operandPerformance, type);
-}
-V1_0::PerformanceInfo lookup(
- const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
- V1_3::OperandType type) {
- CHECK(type != V1_3::OperandType::SUBGRAPH)
- << "Use Capabilities::ifPerformance or Capabilities::whilePerformance";
- return lookup<HalVersion::V1_3>(operandPerformance, type);
-}
-
-// Versioning
-
-// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
-// This array must be in sorted order.
-static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = {
- V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32,
- V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE};
-
-static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
- const V1_0::PerformanceInfo quantized8Performance =
- lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
- return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
- std::end(kQuantized8PerformanceConsistentWithP),
- [quantized8Performance, &capabilities](V1_3::OperandType type) {
- return quantized8Performance ==
- lookup(capabilities.operandPerformance,
- static_cast<V1_2::OperandType>(type));
- });
-}
-
-static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
- const V1_0::PerformanceInfo quantized8Performance =
- lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM);
- return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
- std::end(kQuantized8PerformanceConsistentWithP),
- [quantized8Performance, &capabilities](V1_3::OperandType type) {
- return quantized8Performance ==
- lookup(capabilities.operandPerformance, type);
- });
-}
-
-static hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
-makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) {
- hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
- std::size(kQuantized8PerformanceConsistentWithP));
- std::transform(std::begin(kQuantized8PerformanceConsistentWithP),
- std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
- [quantized8Performance](
- V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance {
- return {static_cast<V1_2::OperandType>(type), quantized8Performance};
- });
- return ret;
-}
-
-bool compliantWithV1_0(const V1_0::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
- return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance;
-}
-
-bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
- const V1_0::PerformanceInfo perfTensorFloat32 =
- lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
- const V1_0::PerformanceInfo perfFloat32 =
- lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
- if (perfTensorFloat32 != perfFloat32 ||
- perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
- perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
- return false;
- }
-
- return isQuantized8PerformanceConsistentWithP(capabilities);
-}
-
-bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
- const V1_0::PerformanceInfo perfTensorFloat32 =
- lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32);
- const V1_0::PerformanceInfo perfFloat32 =
- lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32);
- if (perfTensorFloat32 != perfFloat32 ||
- perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
- perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
- return false;
- }
-
- return isQuantized8PerformanceConsistentWithP(capabilities);
-}
-
-bool compliantWithV1_1(const V1_0::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_1(const V1_1::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
- if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
- capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
- (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) !=
- lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) {
- return false;
- }
-
- return isQuantized8PerformanceConsistentWithP(capabilities);
-}
-
-bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
- if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
- capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
- (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) !=
- lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) {
- return false;
- }
-
- return isQuantized8PerformanceConsistentWithP(capabilities);
-}
-
-bool compliantWithV1_2(const V1_0::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_2(const V1_1::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_2(const V1_2::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_2(const V1_3::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_3(const V1_0::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_3(const V1_1::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_3(const V1_2::Capabilities&) {
- return true;
-}
-
-bool compliantWithV1_3(const V1_3::Capabilities&) {
- return true;
-}
-
-V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) {
- return status;
-}
-
-V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) {
- switch (status) {
- case V1_3::ErrorStatus::NONE:
- return V1_0::ErrorStatus::NONE;
- case V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
- return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
- case V1_3::ErrorStatus::GENERAL_FAILURE:
- return V1_0::ErrorStatus::GENERAL_FAILURE;
- case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
- return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
- case V1_3::ErrorStatus::INVALID_ARGUMENT:
- return V1_0::ErrorStatus::INVALID_ARGUMENT;
- case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
- return V1_0::ErrorStatus::GENERAL_FAILURE;
- case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
- return V1_0::ErrorStatus::GENERAL_FAILURE;
- case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
- return V1_0::ErrorStatus::GENERAL_FAILURE;
- case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
- return V1_0::ErrorStatus::GENERAL_FAILURE;
- }
- LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE";
- return V1_0::ErrorStatus::GENERAL_FAILURE;
-}
-
-V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) {
- return static_cast<V1_3::ErrorStatus>(status);
-}
-
-V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) {
- return status;
-}
-
-static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
- return static_cast<V1_0::OperationType>(type);
-}
-
-static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
- return static_cast<V1_0::OperationType>(type);
-}
-
-V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) {
- return static_cast<V1_0::OperationType>(type);
-}
-
-static V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
- return static_cast<V1_1::OperationType>(type);
-}
-
-static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) {
- return static_cast<V1_1::OperationType>(type);
-}
-
-V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) {
- return static_cast<V1_1::OperationType>(type);
-}
-
-static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
- return static_cast<V1_2::OperationType>(type);
-}
-
-static V1_2::OperationType convertToV1_2(V1_1::OperationType type) {
- return static_cast<V1_2::OperationType>(type);
-}
-
-V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) {
- return static_cast<V1_2::OperationType>(type);
-}
-
-static V1_3::OperationType convertToV1_3(V1_0::OperationType type) {
- return static_cast<V1_3::OperationType>(type);
-}
-
-static V1_3::OperationType convertToV1_3(V1_1::OperationType type) {
- return static_cast<V1_3::OperationType>(type);
-}
-
-static V1_3::OperationType convertToV1_3(V1_2::OperationType type) {
- return static_cast<V1_3::OperationType>(type);
-}
-
-V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
- return capabilities;
-}
-
-V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
- if (!compliantWithV1_0(capabilities)) {
- LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
- << " from V1_1::Capabilities to V1_0::Capabilities";
- }
- return {.float32Performance = capabilities.float32Performance,
- .quantized8Performance = capabilities.quantized8Performance};
-}
-
-V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) {
- if (!compliantWithV1_0(capabilities)) {
- LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
- << " from V1_2::Capabilities to V1_0::Capabilities";
- }
- return {.float32Performance =
- lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
- .quantized8Performance = lookup(capabilities.operandPerformance,
- V1_2::OperandType::TENSOR_QUANT8_ASYMM)};
-}
-
-V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) {
- if (!compliantWithV1_0(capabilities)) {
- LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
- << " from V1_3::Capabilities to V1_0::Capabilities";
- }
- return {.float32Performance =
- lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
- .quantized8Performance = lookup(capabilities.operandPerformance,
- V1_3::OperandType::TENSOR_QUANT8_ASYMM)};
-}
-
-V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
- return {.float32Performance = capabilities.float32Performance,
- .quantized8Performance = capabilities.quantized8Performance,
- .relaxedFloat32toFloat16Performance = capabilities.float32Performance};
-}
-
-V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
- return capabilities;
-}
-
-V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) {
- if (!compliantWithV1_1(capabilities)) {
- LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
- << " from V1_2::Capabilities to V1_1::Capabilities";
- }
- return {.float32Performance =
- lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
- .quantized8Performance =
- lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM),
- .relaxedFloat32toFloat16Performance =
- capabilities.relaxedFloat32toFloat16PerformanceTensor};
-}
-
-V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) {
- if (!compliantWithV1_1(capabilities)) {
- LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
- << " from V1_3::Capabilities to V1_1::Capabilities";
- }
- return {.float32Performance =
- lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
- .quantized8Performance =
- lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM),
- .relaxedFloat32toFloat16Performance =
- capabilities.relaxedFloat32toFloat16PerformanceTensor};
-}
-
-V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) {
- V1_2::Capabilities ret = {
- .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance,
- .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance,
- .operandPerformance =
- makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
- auto& opPerf = ret.operandPerformance;
- opPerf.resize(opPerf.size() + 2);
- opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
- capabilities.float32Performance};
- opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
- using OperandPerformance = V1_2::Capabilities::OperandPerformance;
- std::sort(opPerf.begin(), opPerf.end(),
- [](const OperandPerformance& a, const OperandPerformance& b) {
- return a.type < b.type;
- });
- return ret;
-}
-
-V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) {
- V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar =
- capabilities.relaxedFloat32toFloat16Performance,
- .relaxedFloat32toFloat16PerformanceTensor =
- capabilities.relaxedFloat32toFloat16Performance,
- .operandPerformance = makeQuantized8PerformanceConsistentWithP(
- capabilities.quantized8Performance)};
- auto& opPerf = ret.operandPerformance;
- opPerf.resize(opPerf.size() + 2);
- opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
- capabilities.float32Performance};
- opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
- using OperandPerformance = V1_2::Capabilities::OperandPerformance;
- std::sort(opPerf.begin(), opPerf.end(),
- [](const OperandPerformance& a, const OperandPerformance& b) {
- return a.type < b.type;
- });
- return ret;
-}
-
-V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) {
- return capabilities;
-}
-
-V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) {
- V1_2::Capabilities ret = {
- .relaxedFloat32toFloat16PerformanceScalar =
- capabilities.relaxedFloat32toFloat16PerformanceScalar,
- .relaxedFloat32toFloat16PerformanceTensor =
- capabilities.relaxedFloat32toFloat16PerformanceTensor,
- };
- const auto& inputOpPerf = capabilities.operandPerformance;
- hardware::hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
- opPerfSupported.resize(inputOpPerf.size());
- auto last =
- std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
- [](V1_3::Capabilities::OperandPerformance opPerf) {
- return validOperandType(static_cast<V1_2::OperandType>(opPerf.type));
- });
- opPerfSupported.resize(std::distance(opPerfSupported.begin(), last));
-
- auto& convertedOpPerf = ret.operandPerformance;
- convertedOpPerf.resize(opPerfSupported.size());
- std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(),
- [](V1_3::Capabilities::OperandPerformance opPerf) {
- return V1_2::Capabilities::OperandPerformance{
- static_cast<V1_2::OperandType>(opPerf.type), opPerf.info};
- });
- return ret;
-}
-
-V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) {
- return convertToV1_3(convertToV1_2(capabilities));
-}
-
-V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) {
- return convertToV1_3(convertToV1_2(capabilities));
-}
-
-V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) {
- V1_3::Capabilities ret = {
- .relaxedFloat32toFloat16PerformanceScalar =
- capabilities.relaxedFloat32toFloat16PerformanceScalar,
- .relaxedFloat32toFloat16PerformanceTensor =
- capabilities.relaxedFloat32toFloat16PerformanceTensor,
- .ifPerformance = kNoPerformanceInfo,
- .whilePerformance = kNoPerformanceInfo,
- };
- auto& opPerf = ret.operandPerformance;
- opPerf.resize(capabilities.operandPerformance.size());
- std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
- opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) {
- return V1_3::Capabilities::OperandPerformance{
- static_cast<V1_3::OperandType>(opPerf.type), opPerf.info};
- });
- return ret;
-}
-
-V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) {
- return capabilities;
-}
-
-static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
- return {.type = uncheckedConvertToV1_0(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
- return {.type = convertToV1_1(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hardware::hidl_vec<V1_1::Operation>& operations) {
- hardware::hidl_vec<V1_0::Operation> result(operations.size());
- std::transform(
- operations.begin(), operations.end(), result.begin(),
- [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_1::Operation> convertToV1_1(
- const hardware::hidl_vec<V1_0::Operation>& operations) {
- hardware::hidl_vec<V1_1::Operation> result(operations.size());
- std::transform(operations.begin(), operations.end(), result.begin(),
- [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
- return result;
-}
-
-bool compliantWithV1_0(const V1_3::Operand& operand) {
- return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
- (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
- operand.dimensions.size() != 0) &&
- compliantWithV1_0(operand.lifetime);
-}
-
-bool compliantWithV1_2(const V1_3::Operand& operand) {
- return validOperandType(static_cast<V1_2::OperandType>(operand.type)) &&
- compliantWithV1_0(operand.lifetime);
-}
-
-bool compliantWithV1_3(const V1_3::Operand& operand) {
- return true;
-}
-
-static bool compliantWith(HalVersion version, const V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
- // A boolean vector indicating whether each pool is compliant with the target HAL version.
- std::vector<bool> isPoolCompliant(model.pools.size(), false);
- std::transform(
- model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
- [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); });
-
- // A boolean vector indicating whether each operand is compliant with the target HAL version.
- std::vector<bool> isOperandCompliant(model.main.operands.size(), false);
- std::transform(model.main.operands.begin(), model.main.operands.end(),
- isOperandCompliant.begin(),
- [&isPoolCompliant, version](const V1_3::Operand& op) {
- bool is_operand_compliant = false;
- switch (version) {
- case HalVersion::UNKNOWN:
- is_operand_compliant = false;
- break;
- case HalVersion::V1_0:
- is_operand_compliant = compliantWithV1_0(op);
- break;
- case HalVersion::V1_1:
- // There is no V1_1::Operand -- both V1_0::Model
- // and V1_1::Model use V1_0::Operand.
- is_operand_compliant = compliantWithV1_0(op);
- break;
- case HalVersion::V1_2:
- is_operand_compliant = compliantWithV1_2(op);
- break;
- case HalVersion::V1_3:
- is_operand_compliant = compliantWithV1_3(op);
- break;
- }
- return is_operand_compliant &&
- !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE &&
- !isPoolCompliant[op.location.poolIndex]);
- });
-
- auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec<uint32_t>& indices) {
- return std::all_of(
- indices.begin(), indices.end(),
- [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
- };
-
- auto localValidateOperation = [&model, version,
- &allOperandsCompliant](const V1_3::Operation& op) {
- if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
- int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr,
- op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr,
- uncheckedConvert(model.main.operands), version);
- return error == ANEURALNETWORKS_NO_ERROR;
- };
-
- if (noncompliantOperations) {
- CHECK(noncompliantOperations->empty());
- for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) {
- if (!localValidateOperation(model.main.operations[idx])) {
- noncompliantOperations->insert(idx);
- }
- }
- return noncompliantOperations->empty();
- } else {
- return std::all_of(model.main.operations.begin(), model.main.operations.end(),
- localValidateOperation);
- }
-}
-
-bool compliantWithV1_0(const V1_0::Model& model) {
- return true;
-}
-
-bool compliantWithV1_0(const V1_1::Model& model) {
- // In addition to new enumeration values being introduced in V1_1::Model, a
- // new flag was introduced to indicate whether or not float32 data can be
- // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
- // flag is not relevant in whether a V1_1::Model is compliant with a
- // V1_0::Model because all 1.0 drivers require strict calculation by default
- // in the P NN runtime. Even if fp16 calculations are allowed, they can
- // still be computed by a strict fp32 driver.
- auto operands = uncheckedConvert(convertToV1_3(model.operands));
- return std::all_of(model.operations.begin(), model.operations.end(),
- [&operands](const V1_1::Operation& op) {
- int error = validateOperation(
- static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr,
- op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands,
- HalVersion::V1_0);
- return error == ANEURALNETWORKS_NO_ERROR;
- });
-}
-
-bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
- return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations);
-}
-
-bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
- return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
-}
-
-bool compliantWithV1_1(const V1_0::Model&) {
- return true;
-}
-
-bool compliantWithV1_1(const V1_1::Model&) {
- return true;
-}
-
-bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
- return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations);
-}
-
-bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
- return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
-}
-
-bool compliantWithV1_2(const V1_0::Model&) {
- return true;
-}
-
-bool compliantWithV1_2(const V1_1::Model&) {
- return true;
-}
-
-bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* noncompliantOperations) {
- return true;
-}
-
-bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
- return compliantWith(HalVersion::V1_2, model, noncompliantOperations);
-}
-
-static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) {
- return {.type = uncheckedConvertToV1_0(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) {
- return {.type = uncheckedConvertToV1_0(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
- return {.type = uncheckedConvertToV1_1(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) {
- return {.type = uncheckedConvertToV1_1(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
- return {.type = convertToV1_2(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) {
- return {.type = convertToV1_2(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) {
- return {.type = uncheckedConvertToV1_2(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) {
- return {.type = convertToV1_3(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) {
- return {.type = convertToV1_3(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) {
- return {.type = convertToV1_3(operation.type),
- .inputs = operation.inputs,
- .outputs = operation.outputs};
-}
-
-static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hardware::hidl_vec<V1_3::Operation>& operations) {
- hardware::hidl_vec<V1_0::Operation> result(operations.size());
- std::transform(
- operations.begin(), operations.end(), result.begin(),
- [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hardware::hidl_vec<V1_2::Operation>& operations) {
- hardware::hidl_vec<V1_0::Operation> result(operations.size());
- std::transform(
- operations.begin(), operations.end(), result.begin(),
- [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
- const hardware::hidl_vec<V1_3::Operation>& operations) {
- hardware::hidl_vec<V1_2::Operation> result(operations.size());
- std::transform(
- operations.begin(), operations.end(), result.begin(),
- [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
- const hardware::hidl_vec<V1_2::Operation>& operations) {
- hardware::hidl_vec<V1_1::Operation> result(operations.size());
- std::transform(
- operations.begin(), operations.end(), result.begin(),
- [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
- const hardware::hidl_vec<V1_3::Operation>& operations) {
- hardware::hidl_vec<V1_1::Operation> result(operations.size());
- std::transform(
- operations.begin(), operations.end(), result.begin(),
- [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
- const hardware::hidl_vec<V1_0::Operation>& operations) {
- hardware::hidl_vec<V1_2::Operation> result(operations.size());
- std::transform(operations.begin(), operations.end(), result.begin(),
- [](const V1_0::Operation& operation) { return convertToV1_2(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
- const hardware::hidl_vec<V1_1::Operation>& operations) {
- hardware::hidl_vec<V1_2::Operation> result(operations.size());
- std::transform(operations.begin(), operations.end(), result.begin(),
- [](const V1_1::Operation& operation) { return convertToV1_2(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
- const hardware::hidl_vec<V1_0::Operation>& operations) {
- hardware::hidl_vec<V1_3::Operation> result(operations.size());
- std::transform(operations.begin(), operations.end(), result.begin(),
- [](const V1_0::Operation& operation) { return convertToV1_3(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
- const hardware::hidl_vec<V1_1::Operation>& operations) {
- hardware::hidl_vec<V1_3::Operation> result(operations.size());
- std::transform(operations.begin(), operations.end(), result.begin(),
- [](const V1_1::Operation& operation) { return convertToV1_3(operation); });
- return result;
-}
-
-static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
- const hardware::hidl_vec<V1_2::Operation>& operations) {
- hardware::hidl_vec<V1_3::Operation> result(operations.size());
- std::transform(operations.begin(), operations.end(), result.begin(),
- [](const V1_2::Operation& operation) { return convertToV1_3(operation); });
- return result;
-}
-
-static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
- return validOperandType(static_cast<V1_0::OperandType>(operandType));
-}
-
-static bool compliantWithV1_0(const V1_3::OperandType& operandType) {
- return validOperandType(static_cast<V1_0::OperandType>(operandType));
-}
-
-static bool compliantWithV1_2(const V1_3::OperandType& operandType) {
- return validOperandType(static_cast<V1_2::OperandType>(operandType));
-}
-
-V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
- if (!compliantWithV1_0(operandType)) {
- LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
- << " from V1_2::OperandType to V1_0::OperandType";
- }
- return static_cast<V1_0::OperandType>(operandType);
-}
-
-V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
- return static_cast<V1_2::OperandType>(operandType);
-}
-
-V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) {
- if (!compliantWithV1_2(operandType)) {
- LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
- << " from V1_3::OperandType to V1_2::OperandType";
- }
- return static_cast<V1_2::OperandType>(operandType);
-}
-
-V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) {
- if (!compliantWithV1_0(operandType)) {
- LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
- << " from V1_3::Operand to V1_0::Operand";
- }
- return static_cast<V1_0::OperandType>(operandType);
-}
-
-bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) {
- return true;
-}
-
-bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) {
- return lifetime != V1_3::OperandLifeTime::SUBGRAPH;
-}
-
-bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) {
- return true;
-}
-
-bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) {
- return true;
-}
-
-V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) {
- return lifetime;
-}
-
-V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) {
- if (!compliantWithV1_0(lifetime)) {
- LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime)
- << " from V1_3 to V1_0";
- }
- return static_cast<V1_0::OperandLifeTime>(lifetime);
-}
-
-V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) {
- return static_cast<V1_3::OperandLifeTime>(lifetime);
-}
-
-V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) {
- return lifetime;
-}
-
-V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
- return {.type = convertToV1_0(operand.type),
- .dimensions = operand.dimensions,
- .numberOfConsumers = operand.numberOfConsumers,
- .scale = operand.scale,
- .zeroPoint = operand.zeroPoint,
- .lifetime = convertToV1_0(operand.lifetime),
- .location = operand.location};
-}
-
-V1_0::Operand convertToV1_0(const V1_3::Operand& operand) {
- return {.type = convertToV1_0(operand.type),
- .dimensions = operand.dimensions,
- .numberOfConsumers = operand.numberOfConsumers,
- .scale = operand.scale,
- .zeroPoint = operand.zeroPoint,
- .lifetime = convertToV1_0(operand.lifetime),
- .location = operand.location};
-}
-
-V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
- return {.type = convertToV1_2(operand.type),
- .dimensions = operand.dimensions,
- .numberOfConsumers = operand.numberOfConsumers,
- .scale = operand.scale,
- .zeroPoint = operand.zeroPoint,
- .lifetime = operand.lifetime,
- .location = operand.location};
-}
-
-V1_2::Operand convertToV1_2(const V1_3::Operand& operand) {
- return {.type = convertToV1_2(operand.type),
- .dimensions = operand.dimensions,
- .numberOfConsumers = operand.numberOfConsumers,
- .scale = operand.scale,
- .zeroPoint = operand.zeroPoint,
- .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime),
- .location = operand.location,
- .extraParams = operand.extraParams};
-}
-
-V1_3::Operand convertToV1_3(const V1_0::Operand& operand) {
- return {.type = static_cast<V1_3::OperandType>(operand.type),
- .dimensions = operand.dimensions,
- .numberOfConsumers = operand.numberOfConsumers,
- .scale = operand.scale,
- .zeroPoint = operand.zeroPoint,
- .lifetime = convertToV1_3(operand.lifetime),
- .location = operand.location};
-}
-
-V1_3::Operand convertToV1_3(const V1_2::Operand& operand) {
- return {.type = static_cast<V1_3::OperandType>(operand.type),
- .dimensions = operand.dimensions,
- .numberOfConsumers = operand.numberOfConsumers,
- .scale = operand.scale,
- .zeroPoint = operand.zeroPoint,
- .lifetime = convertToV1_3(operand.lifetime),
- .location = operand.location,
- .extraParams = operand.extraParams};
-}
-
-V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
- return operand;
-}
-
-hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands) {
- return operands;
-}
-
-hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands) {
- hardware::hidl_vec<V1_0::Operand> result(operands.size());
- std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
- return result;
-}
-
-hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands) {
- hardware::hidl_vec<V1_0::Operand> result(operands.size());
- std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_3::Operand& operand) { return convertToV1_0(operand); });
- return result;
-}
-
-hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands) {
- hardware::hidl_vec<V1_2::Operand> result(operands.size());
- std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_0::Operand& operand) { return convertToV1_2(operand); });
- return result;
-}
-
-hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands) {
- return operands;
-}
-
-hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands) {
- hardware::hidl_vec<V1_2::Operand> result(operands.size());
- std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_3::Operand& operand) { return convertToV1_2(operand); });
- return result;
-}
-
-hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands) {
- hardware::hidl_vec<V1_3::Operand> result(operands.size());
- std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_0::Operand& operand) { return convertToV1_3(operand); });
- return result;
-}
-
-hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands) {
- hardware::hidl_vec<V1_3::Operand> result(operands.size());
- std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_2::Operand& operand) { return convertToV1_3(operand); });
- return result;
-}
-
-hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands) {
- return operands;
-}
-
-V1_0::Model convertToV1_0(const V1_0::Model& model) {
- return model;
-}
-
-V1_0::Model convertToV1_0(const V1_1::Model& model) {
- if (!compliantWithV1_0(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_1::Model to V1_0::Model";
- }
- return {.operands = model.operands,
- .operations = uncheckedConvertToV1_0(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools};
-}
-
-V1_0::Model convertToV1_0(const V1_2::Model& model) {
- if (!compliantWithV1_0(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_2::Model to V1_0::Model";
- }
- return {.operands = convertToV1_0(model.operands),
- .operations = uncheckedConvertToV1_0(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools};
-}
-
-V1_0::Model convertToV1_0(const V1_3::Model& model) {
- if (!compliantWithV1_0(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_3::Model to V1_0::Model";
- }
- return {.operands = convertToV1_0(model.main.operands),
- .operations = uncheckedConvertToV1_0(model.main.operations),
- .inputIndexes = model.main.inputIndexes,
- .outputIndexes = model.main.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools};
-}
-
-V1_1::Model convertToV1_1(const V1_0::Model& model) {
- return {.operands = model.operands,
- .operations = convertToV1_1(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = false};
-}
-
-V1_1::Model convertToV1_1(const V1_1::Model& model) {
- return model;
-}
-
-V1_1::Model convertToV1_1(const V1_2::Model& model) {
- if (!compliantWithV1_1(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_2::Model to V1_1::Model";
- }
- return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
- .operations = uncheckedConvertToV1_1(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
-}
-
-V1_1::Model convertToV1_1(const V1_3::Model& model) {
- if (!compliantWithV1_1(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_3::Model to V1_1::Model";
- }
- return {// Operands in 1.1 and 1.0 are identical.
- .operands = convertToV1_0(model.main.operands),
- .operations = uncheckedConvertToV1_1(model.main.operations),
- .inputIndexes = model.main.inputIndexes,
- .outputIndexes = model.main.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
-}
-
-V1_2::Model convertToV1_2(const V1_0::Model& model) {
- return {.operands = convertToV1_2(model.operands),
- .operations = convertToV1_2(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = false};
-}
-
-V1_2::Model convertToV1_2(const V1_1::Model& model) {
- return {.operands = convertToV1_2(model.operands),
- .operations = convertToV1_2(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
-}
-
-V1_2::Model convertToV1_2(const V1_2::Model& model) {
- return model;
-}
-
-V1_2::Model convertToV1_2(const V1_3::Model& model) {
- if (!compliantWithV1_2(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_3::Model to V1_2::Model";
- }
- return {.operands = convertToV1_2(model.main.operands),
- .operations = uncheckedConvertToV1_2(model.main.operations),
- .inputIndexes = model.main.inputIndexes,
- .outputIndexes = model.main.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = model.extensionNameToPrefix};
-}
-
-V1_3::Model convertToV1_3(const V1_0::Model& model) {
- return {.main = {.operands = convertToV1_3(model.operands),
- .operations = convertToV1_3(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes},
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = false};
-}
-
-V1_3::Model convertToV1_3(const V1_1::Model& model) {
- return {.main = {.operands = convertToV1_3(model.operands),
- .operations = convertToV1_3(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes},
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
-}
-
-V1_3::Model convertToV1_3(const V1_2::Model& model) {
- return {.main = {.operands = convertToV1_3(model.operands),
- .operations = convertToV1_3(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes},
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = model.extensionNameToPrefix};
-}
-
-V1_3::Model convertToV1_3(const V1_3::Model& model) {
- return model;
-}
-
-bool compliantWithV1_0(const V1_0::Request& request) {
- return true;
-}
-
-bool compliantWithV1_0(const V1_3::Request& request) {
- return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
- if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) {
- return false;
- }
- const auto& name = pool.hidlMemory().name();
- return name == "ashmem" || name == "mmap_fd";
- });
-}
-
-bool compliantWithV1_2(const V1_3::Request& request) {
- return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
- if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) {
- return false;
- }
- const auto& name = pool.hidlMemory().name();
- return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" ||
- name == "hardware_buffer";
- });
-}
-
-static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
- switch (pool.getDiscriminator()) {
- case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
- return pool.hidlMemory();
- case V1_3::Request::MemoryPool::hidl_discriminator::token:
- return hardware::hidl_memory{};
- }
-}
-
-static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) {
- V1_3::Request::MemoryPool ret;
- ret.hidlMemory(pool);
- return ret;
-}
-
-V1_0::Request convertToV1_0(const V1_0::Request& request) {
- return request;
-}
-
-static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) {
- hardware::hidl_vec<hardware::hidl_memory> pools(request.pools.size());
- std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
- [](const auto& pool) { return convertToV1_0(pool); });
- return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
-}
-
-V1_0::Request convertToV1_0(const V1_3::Request& request) {
- if (!compliantWithV1_0(request)) {
- LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
- << " from V1_3::Request to V1_0::Request of version 1.0";
- }
- return uncheckedConvertToV1_0(request);
-}
-
-V1_0::Request convertToV1_2(const V1_3::Request& request) {
- if (!compliantWithV1_2(request)) {
- LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
- << " from V1_3::Request to V1_0::Request of version 1.2";
- }
- return uncheckedConvertToV1_0(request);
-}
-
-V1_3::Request convertToV1_3(const V1_0::Request& request) {
- hardware::hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
- std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
- [](const auto& pool) { return convertToV1_3(pool); });
- return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
-}
-
-V1_3::Request convertToV1_3(const V1_3::Request& request) {
- return request;
-}
-
FenceState syncWait(int fd, int timeout) {
// This implementation is directly based on the ::sync_wait() implementation.
@@ -3273,293 +1873,5 @@ uint32_t getProp(const char* str, uint32_t defaultValue) {
}
#endif // NN_DEBUGGABLE
-ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) {
- return nnTryGetValue(convert(status));
-}
-
-ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) {
- return nnTryGetValue(convert(status));
-}
-
-OperandType uncheckedConvert(V1_3::OperandType operandType) {
- return nnTryGetValue(convert(operandType));
-}
-
-OperationType uncheckedConvert(V1_3::OperationType operandType) {
- return nnTryGetValue(convert(operandType));
-}
-
-Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) {
- return nnTryGetValue(convert(lifetime));
-}
-
-MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) {
- return nnTryGetValue(convert(measure));
-}
-
-DataLocation uncheckedConvert(const V1_0::DataLocation& location) {
- return nnTryGetValue(convert(location));
-}
-
-Operand uncheckedConvert(const V1_3::Operand& operand) {
- return nnTryGetValue(convert(operand));
-}
-
-Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) {
- return nnTryGetValue(convert(params));
-}
-
-Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) {
- return nnTryGetValue(convert(params));
-}
-
-Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params) {
- return params;
-}
-
-Operation uncheckedConvert(const V1_3::Operation& operation) {
- return nnTryGetValue(convert(operation));
-}
-
-template <typename CanonicalType, typename HalType>
-static std::vector<CanonicalType> convertVec(const hardware::hidl_vec<HalType>& items) {
- std::vector<CanonicalType> result(items.size());
- std::transform(items.begin(), items.end(), result.begin(),
- [](const HalType& item) { return uncheckedConvert(item); });
- return result;
-}
-
-Model uncheckedConvert(const V1_3::Model& model) {
- return nnTryGetValue(convert(model));
-}
-
-Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) {
- return nnTryGetValue(convert(subgraph));
-}
-
-Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) {
- return nnTryGetValue(convert(x));
-}
-
-Request uncheckedConvert(const V1_3::Request& request) {
- return nnTryGetValue(convert(request));
-}
-
-Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) {
- return nnTryGetValue(convert(requestArgument));
-}
-
-Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) {
- return nnTryGetValue(convert(memoryPool));
-}
-
-OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) {
- return nnTryGetValue(convert(outputShape));
-}
-
-std::vector<OutputShape> uncheckedConvert(
- const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) {
- return convertVec<OutputShape>(outputShapes);
-}
-
-Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) {
- return nnTryGetValue(convert(capabilities));
-}
-
-Capabilities::OperandPerformance uncheckedConvert(
- const V1_3::Capabilities::OperandPerformance& operandPerformance) {
- return nnTryGetValue(convert(operandPerformance));
-}
-
-Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) {
- return nnTryGetValue(convert(performanceInfo));
-}
-
-Extension uncheckedConvert(const V1_2::Extension& extension) {
- return nnTryGetValue(convert(extension));
-}
-
-std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions) {
- return convertVec<Extension>(extensions);
-}
-
-Extension::OperandTypeInformation uncheckedConvert(
- const V1_2::Extension::OperandTypeInformation& info) {
- return nnTryGetValue(convert(info));
-}
-
-OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) {
- return nnTryGetValue(convert(timeoutDuration));
-}
-
-Timing uncheckedConvert(const V1_2::Timing& timing) {
- return nnTryGetValue(convert(timing));
-}
-
-V1_0::ErrorStatus convertToV1_0(ErrorStatus status) {
- return static_cast<V1_0::ErrorStatus>(static_cast<int>(status));
-}
-
-V1_3::ErrorStatus convertToV1_3(ErrorStatus status) {
- return nnTryGetValue(V1_3::utils::convert(status));
-}
-
-V1_3::OperandType convertToV1_3(OperandType operandType) {
- return nnTryGetValue(V1_3::utils::convert(operandType));
-}
-
-V1_3::OperationType convertToV1_3(OperationType operandType) {
- return nnTryGetValue(V1_3::utils::convert(operandType));
-}
-
-V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) {
- return nnTryGetValue(V1_3::utils::convert(lifetime));
-}
-
-V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) {
- return nnTryGetValue(V1_1::utils::convert(preference));
-}
-
-V1_3::Priority convertToV1_3(Priority priority) {
- return nnTryGetValue(V1_3::utils::convert(priority));
-}
-
-V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) {
- return nnTryGetValue(V1_2::utils::convert(measure));
-}
-
-V1_0::DataLocation convertToV1_0(const DataLocation& location) {
- return nnTryGetValue(V1_0::utils::convert(location));
-}
-
-V1_3::Operand convertToV1_3(const Operand& operand) {
- return nnTryGetValue(V1_3::utils::convert(operand));
-}
-
-V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) {
- return nnTryGetValue(V1_2::utils::convert(params));
-}
-
-V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) {
- return nnTryGetValue(V1_2::utils::convert(params));
-}
-
-hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params) {
- return params;
-}
-
-V1_3::Operation convertToV1_3(const Operation& operation) {
- return nnTryGetValue(V1_3::utils::convert(operation));
-}
-
-template <typename HalType, typename CanonicalType>
-static hardware::hidl_vec<HalType> convertVecToV1_0(const std::vector<CanonicalType>& items) {
- hardware::hidl_vec<HalType> result(items.size());
- std::transform(items.begin(), items.end(), result.begin(),
- [](const CanonicalType& item) { return convertToV1_0(item); });
- return result;
-}
-
-template <typename HalType, typename CanonicalType>
-static hardware::hidl_vec<HalType> convertVecToV1_2(const std::vector<CanonicalType>& items) {
- hardware::hidl_vec<HalType> result(items.size());
- std::transform(items.begin(), items.end(), result.begin(),
- [](const CanonicalType& item) { return convertToV1_2(item); });
- return result;
-}
-
-template <typename HalType, typename CanonicalType>
-static hardware::hidl_vec<HalType> convertVecToV1_3(const std::vector<CanonicalType>& items) {
- hardware::hidl_vec<HalType> result(items.size());
- std::transform(items.begin(), items.end(), result.begin(),
- [](const CanonicalType& item) { return convertToV1_3(item); });
- return result;
-}
-
-V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) {
- return nnTryGetValue(V1_2::utils::convert(outputShape));
-}
-
-hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes) {
- return convertVecToV1_2<V1_2::OutputShape>(outputShapes);
-}
-
-V1_3::Model convertToV1_3(const Model& model) {
- return nnTryGetValue(V1_3::utils::convert(model));
-}
-
-V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) {
- return nnTryGetValue(V1_3::utils::convert(subgraph));
-}
-
-V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) {
- return nnTryGetValue(V1_2::utils::convert(x));
-}
-
-V1_3::Request convertToV1_3(const Request& request) {
- return nnTryGetValue(V1_3::utils::convert(request));
-}
-
-V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) {
- return nnTryGetValue(V1_0::utils::convert(requestArgument));
-}
-
-V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) {
- return nnTryGetValue(V1_3::utils::convert(memoryPool));
-}
-
-std::vector<Request::MemoryPool> uncheckedConvert(
- const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools) {
- return convertVec<Request::MemoryPool>(memoryPools);
-}
-
-V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) {
- return nnTryGetValue(V1_3::utils::convert(timePoint));
-}
-
-V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) {
- return nnTryGetValue(V1_3::utils::convert(timeoutDuration));
-}
-
-V1_2::Timing convertToV1_2(const Timing& timing) {
- return nnTryGetValue(V1_2::utils::convert(timing));
-}
-
-V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) {
- return nnTryGetValue(V1_3::utils::convert(bufferRole));
-}
-
-hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles) {
- return convertVecToV1_3<V1_3::BufferRole>(bufferRoles);
-}
-
-hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues) {
- return nnTryGetValue(V1_0::utils::convert(operandValues));
-}
-
-hardware::hidl_memory convertToV1_0(const Memory& memory) {
- return nnTryGetValue(V1_0::utils::convert(memory));
-}
-
-Memory uncheckedConvert(const hardware::hidl_memory& memory) {
- return nnTryGetValue(convert(memory));
-}
-
-hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories) {
- return convertVecToV1_0<hardware::hidl_memory>(memories);
-}
-
-std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories) {
- return convertVec<Memory>(memories);
-}
-
-std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs) {
- return convertVec<Model::Subgraph>(subgraphs);
-}
-
-std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands) {
- return convertVec<Operand>(operands);
-}
-
} // namespace nn
} // namespace android
diff --git a/nn/common/SharedMemoryAndroid.cpp b/nn/common/SharedMemoryAndroid.cpp
index 9baca73c5..18881e04a 100644
--- a/nn/common/SharedMemoryAndroid.cpp
+++ b/nn/common/SharedMemoryAndroid.cpp
@@ -19,12 +19,12 @@
#include <android-base/scopeguard.h>
#include <android/hardware_buffer.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
-#include <cutils/native_handle.h>
#include <hidl/HidlSupport.h>
#include <hidlmemory/mapping.h>
#include <sys/mman.h>
#include <vndk/hardware_buffer.h>
+#include <algorithm>
#include <any>
#include <limits>
#include <memory>
@@ -66,28 +66,74 @@ GeneralResult<hidl_memory> allocateSharedMemory(size_t size) {
return maybeMemory;
}
-Memory createMemory(const hidl_memory& memory) {
- CHECK_LE(memory.size(), std::numeric_limits<uint32_t>::max());
+GeneralResult<hardware::hidl_handle> hidlHandleFromSharedHandle(const SharedHandle& handle) {
+ if (handle == nullptr) {
+ return {};
+ }
- auto* cloned = native_handle_clone(memory.handle());
- auto nativeHandle = ::android::NativeHandle::create(cloned, /*ownsHandle=*/true);
+ std::vector<base::unique_fd> fds;
+ fds.reserve(handle->fds.size());
+ for (const auto& fd : handle->fds) {
+ int dupFd = dup(fd);
+ if (dupFd == -1) {
+ return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+ }
+ fds.emplace_back(dupFd);
+ }
- return {
- .handle = std::move(nativeHandle),
+ native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
+ if (nativeHandle == nullptr) {
+ return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
+ }
+ for (size_t i = 0; i < fds.size(); ++i) {
+ nativeHandle->data[i] = fds[i].release();
+ }
+ std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+
+ hardware::hidl_handle hidlHandle;
+ hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
+ return hidlHandle;
+}
+
+GeneralResult<SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
+ if (handle == nullptr) {
+ return nullptr;
+ }
+
+ std::vector<base::unique_fd> fds;
+ fds.reserve(handle->numFds);
+ for (int i = 0; i < handle->numFds; ++i) {
+ int dupFd = dup(handle->data[i]);
+ if (dupFd == -1) {
+ return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+ }
+ fds.emplace_back(dupFd);
+ }
+
+ std::vector<int> ints(&handle->data[handle->numFds],
+ &handle->data[handle->numFds + handle->numInts]);
+
+ return std::make_shared<const Handle>(Handle{
+ .fds = std::move(fds),
+ .ints = std::move(ints),
+ });
+}
+
+GeneralResult<Memory> createMemory(const hidl_memory& memory) {
+ CHECK_LE(memory.size(), std::numeric_limits<uint32_t>::max());
+ return Memory{
+ .handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())),
.size = static_cast<uint32_t>(memory.size()),
.name = memory.name(),
};
}
-hidl_memory createHidlMemory(const Memory& memory) {
- const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size);
- // Copy memory to force the native_handle_t to be copied.
- auto copiedMemory = hidlMemory;
- return copiedMemory;
+GeneralResult<hidl_memory> createHidlMemory(const Memory& memory) {
+ return hidl_memory(memory.name, NN_TRY(hidlHandleFromSharedHandle(memory.handle)), memory.size);
}
GeneralResult<Mapping> mapAshmem(const Memory& memory) {
- const auto hidlMemory = createHidlMemory(memory);
+ const auto hidlMemory = NN_TRY(createHidlMemory(memory));
const auto mapping = mapMemory(hidlMemory);
if (mapping == nullptr) {
return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to map memory";
@@ -116,10 +162,10 @@ struct MmapFdMappingContext {
GeneralResult<Mapping> mapMemFd(const Memory& memory) {
const size_t size = memory.size;
- const native_handle_t* handle = memory.handle->handle();
- const int fd = handle->data[0];
- const int prot = handle->data[1];
- const size_t offset = getOffsetFromInts(handle->data[2], handle->data[3]);
+ const SharedHandle& handle = memory.handle;
+ const int fd = handle->fds[0];
+ const int prot = handle->ints[0];
+ const size_t offset = getOffsetFromInts(handle->ints[1], handle->ints[2]);
std::shared_ptr<base::MappedFile> mapping = base::MappedFile::FromFd(fd, offset, size, prot);
if (mapping == nullptr) {
@@ -132,7 +178,7 @@ GeneralResult<Mapping> mapMemFd(const Memory& memory) {
}
GeneralResult<Mapping> mapAhwbBlobMemory(const Memory& memory) {
- const auto* handle = memory.handle->handle();
+ const SharedHandle& handle = memory.handle;
const auto size = memory.size;
const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
@@ -152,7 +198,8 @@ GeneralResult<Mapping> mapAhwbBlobMemory(const Memory& memory) {
AHardwareBuffer* hardwareBuffer = nullptr;
status_t status = AHardwareBuffer_createFromHandle(
- &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer);
+ &desc, NN_TRY(hidlHandleFromSharedHandle(handle)),
+ AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer);
if (status != NO_ERROR) {
return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
<< "Can't create AHardwareBuffer from handle. Error: " << status;
@@ -196,31 +243,23 @@ GeneralResult<Memory> createSharedMemoryFromFd(size_t size, int prot, int fd, si
}
// Duplicate the file descriptor so the resultant Memory owns its own version.
- int dupfd = dup(fd);
- if (dupfd == -1) {
+ int dupFd = dup(fd);
+ if (dupFd == -1) {
// TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here?
return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Failed to dup the fd";
}
- // Create a temporary native handle to own the dupfd.
- native_handle_t* nativeHandle = native_handle_create(1, 3);
- if (nativeHandle == nullptr) {
- close(dupfd);
- // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here?
- return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
- }
+ std::vector<base::unique_fd> fds;
+ fds.emplace_back(dupFd);
const auto [lowOffsetBits, highOffsetBits] = getIntsFromOffset(offset);
- nativeHandle->data[0] = dupfd;
- nativeHandle->data[1] = prot;
- nativeHandle->data[2] = lowOffsetBits;
- nativeHandle->data[3] = highOffsetBits;
-
- // Create a NativeHandle which owns the native handle and fd so that we don't have to manually
- // clean either the native handle or the fd.
- auto ownedHandle = ::android::NativeHandle::create(nativeHandle, /*ownsHandle=*/true);
+ std::vector<int> ints = {prot, lowOffsetBits, highOffsetBits};
- return Memory{.handle = std::move(ownedHandle), .size = size, .name = "mmap_fd"};
+ SharedHandle handle = std::make_shared<const Handle>(Handle{
+ .fds = std::move(fds),
+ .ints = std::move(ints),
+ });
+ return Memory{.handle = std::move(handle), .size = size, .name = "mmap_fd"};
}
GeneralResult<Memory> createSharedMemoryFromHidlMemory(const hardware::hidl_memory& memory) {
@@ -232,19 +271,20 @@ GeneralResult<Memory> createSharedMemoryFromAHWB(const AHardwareBuffer& ahwb) {
AHardwareBuffer_describe(&ahwb, &bufferDesc);
const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
- auto* cloned = native_handle_clone(handle);
- auto nativeHandle = ::android::NativeHandle::create(cloned, /*ownsHandle=*/true);
-
if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
return Memory{
- .handle = std::move(nativeHandle),
+ .handle = NN_TRY(sharedHandleFromNativeHandle(handle)),
.size = bufferDesc.width,
.name = "hardware_buffer_blob",
};
}
// memory size is not used for non-BLOB AHWB memory.
- return Memory{.handle = std::move(nativeHandle), .size = 0, .name = "hardware_buffer"};
+ return Memory{
+ .handle = NN_TRY(sharedHandleFromNativeHandle(handle)),
+ .size = 0,
+ .name = "hardware_buffer",
+ };
}
GeneralResult<Mapping> map(const Memory& memory) {
diff --git a/nn/common/SharedMemoryHost.cpp b/nn/common/SharedMemoryHost.cpp
index 231977cf7..eeb49075e 100644
--- a/nn/common/SharedMemoryHost.cpp
+++ b/nn/common/SharedMemoryHost.cpp
@@ -23,6 +23,7 @@
#include <limits>
#include <memory>
#include <utility>
+#include <vector>
#include "Result.h"
#include "SharedMemory.h"
@@ -36,7 +37,7 @@ GeneralResult<Mapping> mapAshmem(const Memory& memory) {
CHECK_LE(memory.size, std::numeric_limits<uint32_t>::max());
const auto size = memory.size;
- int fd = memory.handle->handle()->data[0];
+ const int fd = memory.handle->fds[0];
std::shared_ptr<base::MappedFile> mapping =
base::MappedFile::FromFd(fd, /*offset=*/0, size, PROT_READ | PROT_WRITE);
if (mapping == nullptr) {
@@ -54,10 +55,10 @@ struct MmapFdMappingContext {
GeneralResult<Mapping> mapMemFd(const Memory& memory) {
const size_t size = memory.size;
- const native_handle_t* handle = memory.handle->handle();
- const int fd = handle->data[0];
- const int prot = handle->data[1];
- const size_t offset = getOffsetFromInts(handle->data[2], handle->data[3]);
+ const SharedHandle& handle = memory.handle;
+ const int fd = handle->fds[0];
+ const int prot = handle->ints[0];
+ const size_t offset = getOffsetFromInts(handle->ints[1], handle->ints[2]);
std::shared_ptr<base::MappedFile> mapping = base::MappedFile::FromFd(fd, offset, size, prot);
if (mapping == nullptr) {
@@ -78,18 +79,14 @@ GeneralResult<Memory> createSharedMemory(size_t size) {
<< "ashmem_create_region(" << size << ") fails with " << fd;
}
- native_handle_t* handle = native_handle_create(1, 0);
- if (handle == nullptr) {
- // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here?
- return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
- }
- handle->data[0] = fd;
-
- // Create a NativeHandle which owns the native handle and fd so that we don't have to manually
- // clean either the native handle or the fd.
- auto nativeHandle = ::android::NativeHandle::create(handle, /*ownsHandle=*/true);
+ std::vector<base::unique_fd> fds;
+ fds.emplace_back(fd);
- return Memory{.handle = std::move(nativeHandle), .size = size, .name = "ashmem"};
+ SharedHandle handle = std::make_shared<const Handle>(Handle{
+ .fds = std::move(fds),
+ .ints = {},
+ });
+ return Memory{.handle = std::move(handle), .size = size, .name = "ashmem"};
}
GeneralResult<Memory> createSharedMemoryFromFd(size_t size, int prot, int fd, size_t offset) {
@@ -98,31 +95,23 @@ GeneralResult<Memory> createSharedMemoryFromFd(size_t size, int prot, int fd, si
}
// Duplicate the file descriptor so the resultant Memory owns its own version.
- int dupfd = dup(fd);
- if (dupfd == -1) {
+ int dupFd = dup(fd);
+ if (dupFd == -1) {
// TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here?
return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Failed to dup the fd";
}
- // Create a temporary native handle to own the dupfd.
- native_handle_t* nativeHandle = native_handle_create(1, 3);
- if (nativeHandle == nullptr) {
- close(dupfd);
- // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here?
- return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
- }
+ std::vector<base::unique_fd> fds;
+ fds.emplace_back(dupFd);
const auto [lowOffsetBits, highOffsetBits] = getIntsFromOffset(offset);
- nativeHandle->data[0] = dupfd;
- nativeHandle->data[1] = prot;
- nativeHandle->data[2] = lowOffsetBits;
- nativeHandle->data[3] = highOffsetBits;
-
- // Create a NativeHandle which owns the native handle and fd so that we don't have to manually
- // clean either the native handle or the fd.
- auto ownedHandle = ::android::NativeHandle::create(nativeHandle, /*ownsHandle=*/true);
+ std::vector<int> ints = {prot, lowOffsetBits, highOffsetBits};
- return Memory{.handle = std::move(ownedHandle), .size = size, .name = "mmap_fd"};
+ SharedHandle handle = std::make_shared<const Handle>(Handle{
+ .fds = std::move(fds),
+ .ints = std::move(ints),
+ });
+ return Memory{.handle = std::move(handle), .size = size, .name = "mmap_fd"};
}
GeneralResult<Memory> createSharedMemoryFromHidlMemory(const hardware::hidl_memory& /*memory*/) {
diff --git a/nn/common/TypeUtils.cpp b/nn/common/TypeUtils.cpp
index 6d089bd2a..79d493e14 100644
--- a/nn/common/TypeUtils.cpp
+++ b/nn/common/TypeUtils.cpp
@@ -701,7 +701,7 @@ std::ostream& operator<<(std::ostream& os, const Operation& operation) {
<< ", .outputs=" << operation.outputs << "}";
}
-std::ostream& operator<<(std::ostream& os, const NativeHandle& handle) {
+std::ostream& operator<<(std::ostream& os, const SharedHandle& handle) {
return os << (handle != nullptr ? "<non-empty handle>" : "<empty handle>");
}
diff --git a/nn/common/Types.cpp b/nn/common/Types.cpp
index 17485f442..e2e4cf112 100644
--- a/nn/common/Types.cpp
+++ b/nn/common/Types.cpp
@@ -17,7 +17,6 @@
#include "Types.h"
#include <android-base/logging.h>
-#include <cutils/native_handle.h>
#include <errno.h>
#include <poll.h>
@@ -25,6 +24,7 @@
#include <cstddef>
#include <iterator>
#include <limits>
+#include <memory>
#include <optional>
#include <utility>
#include <vector>
@@ -128,24 +128,32 @@ SyncFence SyncFence::createAsSignaled() {
return SyncFence(nullptr);
}
-Result<SyncFence> SyncFence::create(NativeHandle syncFence) {
- const bool isValid = (syncFence != nullptr && syncFence->handle() != nullptr &&
- syncFence->handle()->numFds == 1 && syncFence->handle()->numInts == 0 &&
- &syncFence->handle()->data[0] != nullptr);
+SyncFence SyncFence::create(base::unique_fd fd) {
+ std::vector<base::unique_fd> fds;
+ fds.push_back(std::move(fd));
+ return SyncFence(std::make_shared<const Handle>(Handle{
+ .fds = std::move(fds),
+ .ints = {},
+ }));
+}
+
+Result<SyncFence> SyncFence::create(SharedHandle syncFence) {
+ const bool isValid =
+ (syncFence != nullptr && syncFence->fds.size() == 1 && syncFence->ints.empty());
if (!isValid) {
return NN_ERROR() << "Invalid sync fence handle passed to SyncFence::create";
}
return SyncFence(std::move(syncFence));
}
-SyncFence::SyncFence(NativeHandle syncFence) : mSyncFence(std::move(syncFence)) {}
+SyncFence::SyncFence(SharedHandle syncFence) : mSyncFence(std::move(syncFence)) {}
SyncFence::FenceState SyncFence::syncWait(OptionalTimeout optionalTimeout) const {
if (mSyncFence == nullptr) {
return FenceState::SIGNALED;
}
- const int fd = mSyncFence->handle()->data[0];
+ const int fd = mSyncFence->fds.front().get();
const int timeout = optionalTimeout.value_or(Timeout{-1}).count();
// This implementation is directly based on the ::sync_wait() implementation.
@@ -182,8 +190,16 @@ SyncFence::FenceState SyncFence::syncWait(OptionalTimeout optionalTimeout) const
return FenceState::UNKNOWN;
}
-NativeHandle SyncFence::getHandle() const {
+SharedHandle SyncFence::getSharedHandle() const {
return mSyncFence;
}
+bool SyncFence::hasFd() const {
+ return mSyncFence != nullptr;
+}
+
+int SyncFence::getFd() const {
+ return mSyncFence == nullptr ? -1 : mSyncFence->fds.front().get();
+}
+
} // namespace android::nn
diff --git a/nn/common/Validation.cpp b/nn/common/Validation.cpp
index d37c447c0..1c939ba40 100644
--- a/nn/common/Validation.cpp
+++ b/nn/common/Validation.cpp
@@ -679,13 +679,15 @@ Result<Version> validateOperations(const std::vector<Operation>& operations,
return version;
}
-Result<Version> validateNativeHandle(const NativeHandle& handle) {
+Result<Version> validateSharedHandle(const SharedHandle& handle) {
NN_VALIDATE(handle != nullptr);
+ NN_VALIDATE(std::all_of(handle->fds.begin(), handle->fds.end(),
+ [](const base::unique_fd& fd) { return fd.ok(); }));
return Version::ANDROID_OC_MR1;
}
Result<Version> validateMemory(const Memory& memory) {
- NN_TRY(validateNativeHandle(memory.handle));
+ NN_TRY(validateSharedHandle(memory.handle));
if (memory.name == "ashmem") {
NN_VALIDATE_NE(memory.size, 0u);
@@ -2571,8 +2573,8 @@ Result<Version> validate(const Extension& extension) {
return validateExtension(extension);
}
-Result<Version> validate(const NativeHandle& handle) {
- return validateNativeHandle(handle);
+Result<Version> validate(const SharedHandle& handle) {
+ return validateSharedHandle(handle);
}
Result<Version> validate(const Memory& memory) {
@@ -2611,8 +2613,8 @@ Result<Version> validate(const std::vector<Extension>& extensions) {
return validateExtensions(extensions);
}
-Result<Version> validate(const std::vector<NativeHandle>& handles) {
- return validateVector(handles, validateNativeHandle);
+Result<Version> validate(const std::vector<SharedHandle>& handles) {
+ return validateVector(handles, validateSharedHandle);
}
Result<Version> validate(const std::vector<BufferRole>& bufferRoles) {
diff --git a/nn/common/include/LegacyHalUtils.h b/nn/common/include/LegacyHalUtils.h
new file mode 100644
index 000000000..ffa4a8f6b
--- /dev/null
+++ b/nn/common/include/LegacyHalUtils.h
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// This file contains pre-canonical-types utility code and includes HAL
+// utilities. LegacyUtils.h is the subset of these utilities that do not touch
+// HAL.
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H
+
+#include <android-base/logging.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "HalInterfaces.h"
+#include "LegacyUtils.h"
+#include "NeuralNetworks.h"
+#include "ValidateHal.h"
+
+namespace android {
+namespace nn {
+
+// Make an optional Deadline from an OptionalTimePoint. If
+// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a
+// time point holding the maximum Deadline. If the OptionalTimePoint is none,
+// this function returns std::nullopt.
+std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint);
+
+// Ensure that every user of FalseyErrorStream is linked to the
+// correct instance, using the correct LOG_TAG
+namespace {
+
+template <HalVersion version>
+struct VersionedType {};
+
+template <>
+struct VersionedType<HalVersion::V1_2> {
+ using OperandPerformance = V1_2::Capabilities::OperandPerformance;
+ using OperandType = V1_2::OperandType;
+};
+
+template <>
+struct VersionedType<HalVersion::V1_3> {
+ using OperandPerformance = V1_3::Capabilities::OperandPerformance;
+ using OperandType = V1_3::OperandType;
+};
+
+template <HalVersion version>
+using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance;
+template <HalVersion version>
+using VersionedOperandType = typename VersionedType<version>::OperandType;
+
+} // namespace
+
+// Return a vector with one entry for each non-extension OperandType except
+// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be
+// sorted by OperandType.
+//
+// Control flow (OperandType::SUBGRAPH) operation performance is specified
+// separately using Capabilities::ifPerformance and
+// Capabilities::whilePerformance.
+template <HalVersion version>
+hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ V1_0::PerformanceInfo perf);
+
+// Update the vector entry corresponding to the specified OperandType with the
+// specified PerformanceInfo value. The vector must already have an entry for
+// that OperandType, and must be sorted by OperandType.
+void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, V1_0::PerformanceInfo perf);
+void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, V1_0::PerformanceInfo perf);
+
+// Look for a vector entry corresponding to the specified OperandType. If
+// found, return the associated PerformanceInfo. If not, return a pessimistic
+// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type);
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type);
+
+// Returns true if an operand type is an extension type.
+bool isExtensionOperandType(V1_3::OperandType type);
+
+// Returns true if an operation type is an extension type.
+bool isExtensionOperationType(V1_3::OperationType type);
+
+// Returns the amount of space needed to store a value of the specified
+// dimensions and type. For a tensor with unspecified rank or at least one
+// unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type.
+// Aborts if the size would overflow the return type.
+//
+// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
+uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions);
+
+// Returns the amount of space needed to store a value of the dimensions and
+// type of this operand. For a tensor with unspecified rank or at least one
+// unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type.
+// Aborts if the size would overflow the return type.
+//
+// See also TypeManager::getSizeOfData(const Operand&).
+inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) {
+ return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
+}
+
+// Returns true if the amount of space needed to store a value of the specified
+// dimensions and element size overflows the uint32_t type.
+//
+// Aborts if the specified type is an extension type.
+//
+// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions);
+
+// Returns the name of the operation type in ASCII.
+std::string getOperationName(V1_3::OperationType opCode);
+
+// Returns the name of the operand type in ASCII.
+std::string getOperandTypeName(V1_3::OperandType type);
+
+// Whether an operand of tensor type has unspecified dimensions.
+//
+// Undefined behavior if the operand type is a scalar type.
+bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions);
+bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand);
+
+// Does a detailed LOG(INFO) of the model
+void logModelToInfo(const V1_0::Model& model);
+void logModelToInfo(const V1_1::Model& model);
+void logModelToInfo(const V1_2::Model& model);
+void logModelToInfo(const V1_3::Model& model);
+
+bool validateOperandSymmPerChannelQuantParams(
+ const V1_3::Operand& halOperand,
+ const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
+
+// Convert ANEURALNETWORKS_* result code to ErrorStatus.
+// Not guaranteed to be a 1-to-1 mapping.
+V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode);
+
+// Convert ErrorStatus to ANEURALNETWORKS_* result code.
+// Not guaranteed to be a 1-to-1 mapping.
+int convertErrorStatusToResultCode(V1_3::ErrorStatus status);
+
+// Convert execution results to runtime format. Additionally checks that the
+// returned results abide by the HAL specification, and logs an error if the
+// result violates the specification.
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing);
+
+// Versioning
+
+bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_3::Capabilities& capabilities);
+
+// If noncompliantOperations != nullptr, then
+// precondition: noncompliantOperations->empty()
+// postcondition: *noncompliantOperations consists of the indices of the noncompliant
+// operations; if the compliance check fails for some reason
+// other than a noncompliant operation,
+// *noncompliantOperations consists of the indices of all operations
+bool compliantWithV1_0(const V1_0::Model& model);
+bool compliantWithV1_0(const V1_1::Model& model);
+bool compliantWithV1_0(const V1_2::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_0(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_1(const V1_0::Model& model);
+bool compliantWithV1_1(const V1_1::Model& model);
+bool compliantWithV1_1(const V1_2::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_1(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_2(const V1_0::Model& model);
+bool compliantWithV1_2(const V1_1::Model& model);
+bool compliantWithV1_2(const V1_2::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_2(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+
+V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status);
+V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status);
+V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status);
+V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status);
+
+V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities);
+
+V1_0::Model convertToV1_0(const V1_0::Model& model);
+V1_0::Model convertToV1_0(const V1_1::Model& model);
+V1_0::Model convertToV1_0(const V1_2::Model& model);
+V1_0::Model convertToV1_0(const V1_3::Model& model);
+V1_1::Model convertToV1_1(const V1_0::Model& model);
+V1_1::Model convertToV1_1(const V1_1::Model& model);
+V1_1::Model convertToV1_1(const V1_2::Model& model);
+V1_1::Model convertToV1_1(const V1_3::Model& model);
+V1_2::Model convertToV1_2(const V1_0::Model& model);
+V1_2::Model convertToV1_2(const V1_1::Model& model);
+V1_2::Model convertToV1_2(const V1_2::Model& model);
+V1_2::Model convertToV1_2(const V1_3::Model& model);
+V1_3::Model convertToV1_3(const V1_0::Model& model);
+V1_3::Model convertToV1_3(const V1_1::Model& model);
+V1_3::Model convertToV1_3(const V1_2::Model& model);
+V1_3::Model convertToV1_3(const V1_3::Model& model);
+
+V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type);
+V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type);
+V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type);
+
+V1_0::Operand convertToV1_0(const V1_2::Operand& operand);
+V1_0::Operand convertToV1_0(const V1_3::Operand& operand);
+V1_2::Operand convertToV1_2(const V1_0::Operand& operand);
+V1_2::Operand convertToV1_2(const V1_3::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_0::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_2::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_3::Operand& operand);
+
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands);
+
+bool compliantWithV1_0(const V1_0::Request& request);
+bool compliantWithV1_0(const V1_3::Request& request);
+bool compliantWithV1_2(const V1_3::Request& request);
+
+V1_0::Request convertToV1_0(const V1_0::Request& request);
+V1_0::Request convertToV1_0(const V1_3::Request& request);
+V1_0::Request convertToV1_2(const V1_3::Request& request);
+V1_3::Request convertToV1_3(const V1_0::Request& request);
+V1_3::Request convertToV1_3(const V1_3::Request& request);
+
+bool compliantWithV1_0(V1_0::OperandLifeTime lifetime);
+bool compliantWithV1_0(V1_3::OperandLifeTime lifetime);
+bool compliantWithV1_3(V1_0::OperandLifeTime lifetime);
+bool compliantWithV1_3(V1_3::OperandLifeTime lifetime);
+
+V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime);
+V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime);
+V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime);
+V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime);
+
+constexpr V1_3::Priority convertToHalPriority(int32_t priority) {
+ switch (priority) {
+ case ANEURALNETWORKS_PRIORITY_LOW:
+ return V1_3::Priority::LOW;
+ case ANEURALNETWORKS_PRIORITY_MEDIUM:
+ return V1_3::Priority::MEDIUM;
+ case ANEURALNETWORKS_PRIORITY_HIGH:
+ return V1_3::Priority::HIGH;
+ }
+ LOG(FATAL) << "unrecognized priority: " << priority;
+ return {};
+}
+
+// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h.
+Capabilities::OperandPerformance uncheckedConvert(
+ const V1_3::Capabilities::OperandPerformance& operandPerformance);
+Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo);
+Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities);
+DataLocation uncheckedConvert(const V1_0::DataLocation& location);
+ErrorStatus uncheckedConvert(V1_0::ErrorStatus status);
+ErrorStatus uncheckedConvert(V1_3::ErrorStatus status);
+Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&);
+Extension uncheckedConvert(const V1_2::Extension& extension);
+hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params);
+MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure);
+Memory uncheckedConvert(const hardware::hidl_memory& memory);
+Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&);
+Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph);
+Model uncheckedConvert(const V1_3::Model& model);
+Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params);
+Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params);
+Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime);
+Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params);
+OperandType uncheckedConvert(V1_3::OperandType operandType);
+Operand uncheckedConvert(const V1_3::Operand& operand);
+OperationType uncheckedConvert(V1_3::OperationType operationType);
+Operation uncheckedConvert(const V1_3::Operation& operation);
+OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration);
+OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape);
+Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument);
+Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool);
+Request uncheckedConvert(const V1_3::Request& request);
+std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions);
+std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories);
+std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs);
+std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands);
+std::vector<OutputShape> uncheckedConvert(
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes);
+std::vector<Request::MemoryPool> uncheckedConvert(
+ const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools);
+Timing uncheckedConvert(const V1_2::Timing& timing);
+
+// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h.
+hardware::hidl_memory convertToV1_0(const Memory& memory);
+hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories);
+hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues);
+hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes);
+hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles);
+V1_0::DataLocation convertToV1_0(const DataLocation& location);
+V1_0::ErrorStatus convertToV1_0(ErrorStatus status);
+V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument);
+V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference);
+V1_2::MeasureTiming convertToV1_2(MeasureTiming measure);
+V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&);
+V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params);
+V1_2::OutputShape convertToV1_2(const OutputShape& outputShape);
+V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params);
+V1_2::Timing convertToV1_2(const Timing& timing);
+V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole);
+V1_3::ErrorStatus convertToV1_3(ErrorStatus status);
+V1_3::Model convertToV1_3(const Model& model);
+V1_3::Operand convertToV1_3(const Operand& operand);
+V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime);
+V1_3::OperandType convertToV1_3(OperandType operandType);
+V1_3::Operation convertToV1_3(const Operation& operation);
+V1_3::OperationType convertToV1_3(OperationType operationType);
+V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration);
+V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint);
+V1_3::Priority convertToV1_3(Priority priority);
+V1_3::Request convertToV1_3(const Request& request);
+V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool);
+V1_3::Subgraph convertToV1_3(const Model::Subgraph& model);
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H
diff --git a/nn/common/include/LegacyUtils.h b/nn/common/include/LegacyUtils.h
new file mode 100644
index 000000000..64ee835f8
--- /dev/null
+++ b/nn/common/include/LegacyUtils.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// This file contains pre-canonical-types utility code and does not includes HAL
+// utilities. LegacyHalUtils.h is a superset of these utilities that includes
+// HAL utilities.
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H
+#define ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H
+
+#include <android-base/logging.h>
+
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include "NeuralNetworks.h"
+#include "OperationResolver.h"
+
+namespace android {
+namespace nn {
+
+// The number of data types (OperandCode) defined in NeuralNetworks.h.
+const int kNumberOfDataTypes = 16;
+
+// The number of operation types (OperationCode) defined in NeuralNetworks.h.
+const int kNumberOfOperationTypes = 102;
+static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes);
+
+// The number of execution preferences defined in NeuralNetworks.h.
+const int kNumberOfPreferences = 3;
+
+// The number of data types (OperandCode) defined in NeuralNetworksOEM.h.
+const int kNumberOfDataTypesOEM = 2;
+
+// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h.
+const int kNumberOfOperationTypesOEM = 1;
+
+// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
+const int kOEMCodeBase = 10000;
+
+/* IMPORTANT: if you change the following list, don't
+ * forget to update the corresponding 'tags' table in
+ * the initVlogMask() function implemented in Utils.cpp.
+ */
+enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY };
+
+#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0)
+
+#define VLOG(TAG) \
+ if (LIKELY(!VLOG_IS_ON(TAG))) \
+ ; \
+ else \
+ LOG(INFO)
+
+extern int vLogMask;
+void initVLogMask();
+
+#ifdef NN_DEBUGGABLE
+#define SHOW_IF_DEBUG(msg) msg
+#else
+#define SHOW_IF_DEBUG(msg) ""
+#endif
+
+// DEPRECATED(b/118737105). Use CHECK.
+#define nnAssert(v) CHECK(v)
+
+#define NN_RETURN_IF_ERROR(expr) \
+ do { \
+ int _errorCode = (expr); \
+ if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \
+ return _errorCode; \
+ } \
+ } while (0)
+
+// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds
+// the max duration, return the maximum expressible duration.
+TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds);
+
+// Type to represent a deadline time point across processes.
+using Deadline = std::chrono::steady_clock::time_point;
+
+// Make an Deadline from a duration. If the sum of the current time and the
+// duration exceeds the max time, return a time point holding the maximum
+// expressible time.
+Deadline makeDeadline(TimeoutDuration duration);
+inline Deadline makeDeadline(uint64_t duration) {
+ return makeDeadline(makeTimeoutDuration(duration));
+}
+
+// Convenience function. If the duration is provided, this function creates a
+// Deadline using makeDeadline. If the duration is not provided, this function
+// returns std::nullopt.
+inline std::optional<Deadline> makeDeadline(OptionalTimeoutDuration duration) {
+ return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+}
+inline std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
+ return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+}
+
+// Returns true if the deadline has passed. Returns false if either the deadline
+// has not been exceeded or if the deadline is not present.
+bool hasDeadlinePassed(const std::optional<Deadline>& deadline);
+
+// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not
+// provided, this function returns none for OptionalTimePoint.
+OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
+
+// Returns true if an operand type is an extension type.
+bool isExtensionOperandType(OperandType type);
+
+// Returns true if an operation type is an extension type.
+bool isExtensionOperationType(OperationType type);
+
+// Returns the amount of space needed to store a value of the specified
+// dimensions and type. For a tensor with unspecified rank or at least one
+// unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type.
+// Aborts if the size would overflow the return type.
+//
+// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
+uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
+
+// Returns the amount of space needed to store a value of the dimensions and
+// type of this operand. For a tensor with unspecified rank or at least one
+// unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type.
+// Aborts if the size would overflow the return type.
+//
+// See also TypeManager::getSizeOfData(const Operand&).
+inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
+ return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
+}
+
+// Returns the amount of space needed to store a value of the specified
+// dimensions and element size. For a tensor with unspecified rank or at least
+// one unspecified dimension, returns zero.
+//
+// Aborts if the size would overflow the return type.
+//
+// See also TypeManager::getSizeOfData(const Operand&).
+uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions);
+
+// Returns true if the amount of space needed to store a value of the specified
+// dimensions and element size overflows the uint32_t type.
+//
+// Aborts if the specified type is an extension type.
+//
+// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
+ const std::vector<uint32_t>& dimensions);
+
+// Returns true if the amount of space needed to store a value of the specified
+// dimensions and element size overflows the uint32_t type.
+//
+// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
+bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions);
+
+// Returns true if a non-extension operand type is a scalar type.
+//
+// Aborts if the specified type is an extension type.
+//
+// See also TypeManager::isTensorType(OperandType).
+bool nonExtensionOperandTypeIsScalar(int type);
+
+// Whether an operand of tensor type has unspecified dimensions.
+//
+// Undefined behavior if the operand type is a scalar type.
+bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
+bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions);
+bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions);
+bool tensorHasUnspecifiedDimensions(const Operand& operand);
+bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
+
+// Returns the number of padding bytes needed to align data of the
+// specified length. It aligns object of length:
+// 2, 3 on a 2 byte boundary,
+// 4+ on a 4 byte boundary.
+// We may want to have different alignments for tensors.
+// TODO: This is arbitrary, more a proof of concept. We need
+// to determine what this should be.
+uint32_t alignBytesNeeded(uint32_t index, size_t length);
+
+// Does a detailed LOG(INFO) of the model
+void logModelToInfo(const Model& model);
+
+inline std::string toString(uint32_t obj) {
+ return std::to_string(obj);
+}
+
+template <typename Type>
+std::string toString(const std::vector<Type>& range) {
+ std::string os = "[";
+ for (size_t i = 0; i < range.size(); ++i) {
+ os += (i == 0 ? "" : ", ") + toString(range[i]);
+ }
+ return os += "]";
+}
+
+template <typename A, typename B>
+std::string toString(const std::pair<A, B>& pair) {
+ std::ostringstream oss;
+ oss << "(" << pair.first << ", " << pair.second << ")";
+ return oss.str();
+}
+
+inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) {
+ return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM);
+}
+
+// Validates an operand type.
+//
+// extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
+//
+// If allowPartial is true, the dimensions may be underspecified.
+int validateOperandType(const ANeuralNetworksOperandType& type,
+ const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
+ const char* tag, bool allowPartial);
+int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
+ const char* tag);
+
+// A set of functions to help validate models containing IF or WHILE operations.
+struct SubgraphValidationHelper {
+ // Checks if a given operand is a SUBGRAPH operand with a valid offset.
+ std::function<bool(const Operand&)> isValidSubgraphReference;
+ // Gets the input count of a subgraph referenced by a given operand.
+ std::function<uint32_t(const Operand&)> getSubgraphInputCount;
+ // Gets the output count of a subgraph referenced by a given operand.
+ std::function<uint32_t(const Operand&)> getSubgraphOutputCount;
+ // Gets the specified input operand of a subgraph referenced by a given operand.
+ std::function<const Operand*(const Operand&, uint32_t)> getSubgraphInputOperand;
+ // Gets the specified output operand of a subgraph referenced by a given operand.
+ std::function<const Operand*(const Operand&, uint32_t)> getSubgraphOutputOperand;
+ // Whether control flow operations with inner or outer input or output
+ // operands of unknown size are allowed.
+ bool allowControlFlowOperationWithOperandOfUnknownSize;
+};
+
+// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the
+// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
+// The last argument is only used for validating IF and WHILE operations.
+int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
+ const uint32_t* inputIndexes, uint32_t outputCount,
+ const uint32_t* outputIndexes, const std::vector<Operand>& operands,
+ HalVersion halVersion, const SubgraphValidationHelper& helper);
+
+inline size_t getSizeFromInts(int lower, int higher) {
+ return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
+}
+
+// Convert ANEURALNETWORKS_* result code to ErrorStatus.
+// Not guaranteed to be a 1-to-1 mapping.
+ErrorStatus convertResultCodeToErrorStatus(int resultCode);
+
+// Convert ErrorStatus to ANEURALNETWORKS_* result code.
+// Not guaranteed to be a 1-to-1 mapping.
+int convertErrorStatusToResultCode(ErrorStatus status);
+
+// Convert execution results to runtime format. Additionally checks that the
+// returned results abide by the HAL specification, and logs an error if the
+// result violates the specification.
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing);
+
+constexpr Priority convertToCanonicalPriority(int32_t priority) {
+ switch (priority) {
+ case ANEURALNETWORKS_PRIORITY_LOW:
+ return Priority::LOW;
+ case ANEURALNETWORKS_PRIORITY_MEDIUM:
+ return Priority::MEDIUM;
+ case ANEURALNETWORKS_PRIORITY_HIGH:
+ return Priority::HIGH;
+ }
+ LOG(FATAL) << "unrecognized priority: " << priority;
+ return {};
+}
+
+// The function syncWait() has the same semantics as the system function
+// ::sync_wait(), except that the syncWait() return value is semantically
+// richer. The timeout parameter is in msecs.
+enum class FenceState {
+ ACTIVE, // fence has not been signaled
+ SIGNALED, // fence has been signaled
+ ERROR, // fence has been placed in the error state
+ UNKNOWN, // either bad argument passed to syncWait(), or internal error
+};
+FenceState syncWait(int fd, int timeout);
+
+#ifdef NN_DEBUGGABLE
+uint32_t getProp(const char* str, uint32_t defaultValue = 0);
+#endif // NN_DEBUGGABLE
+
+} // namespace nn
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index cdaf91172..7ac1e59c7 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -17,595 +17,7 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
-#include <android-base/logging.h>
-
-#include <set>
-#include <string>
-#include <tuple>
-#include <utility>
-#include <vector>
-
-#include "HalInterfaces.h"
-#include "NeuralNetworks.h"
-#include "OperationResolver.h"
-#include "ValidateHal.h"
-#include "nnapi/TypeUtils.h"
-#include "nnapi/Types.h"
-
-namespace android {
-namespace nn {
-
-// The number of data types (OperandCode) defined in NeuralNetworks.h.
-const int kNumberOfDataTypes = 16;
-
-// The number of operation types (OperationCode) defined in NeuralNetworks.h.
-const int kNumberOfOperationTypes = 102;
-static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes);
-
-// The number of execution preferences defined in NeuralNetworks.h.
-const int kNumberOfPreferences = 3;
-
-// The number of data types (OperandCode) defined in NeuralNetworksOEM.h.
-const int kNumberOfDataTypesOEM = 2;
-
-// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h.
-const int kNumberOfOperationTypesOEM = 1;
-
-// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
-const int kOEMCodeBase = 10000;
-
-/* IMPORTANT: if you change the following list, don't
- * forget to update the corresponding 'tags' table in
- * the initVlogMask() function implemented in Utils.cpp.
- */
-enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY };
-
-#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0)
-
-#define VLOG(TAG) \
- if (LIKELY(!VLOG_IS_ON(TAG))) \
- ; \
- else \
- LOG(INFO)
-
-extern int vLogMask;
-void initVLogMask();
-
-#ifdef NN_DEBUGGABLE
-#define SHOW_IF_DEBUG(msg) msg
-#else
-#define SHOW_IF_DEBUG(msg) ""
-#endif
-
-// DEPRECATED(b/118737105). Use CHECK.
-#define nnAssert(v) CHECK(v)
-
-#define NN_RETURN_IF_ERROR(expr) \
- do { \
- int _errorCode = (expr); \
- if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \
- return _errorCode; \
- } \
- } while (0)
-
-// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds
-// the max duration, return the maximum expressible duration.
-TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds);
-
-// Type to represent a deadline time point across processes.
-using Deadline = std::chrono::steady_clock::time_point;
-
-// Make an Deadline from a duration. If the sum of the current time and the
-// duration exceeds the max time, return a time point holding the maximum
-// expressible time.
-Deadline makeDeadline(TimeoutDuration duration);
-inline Deadline makeDeadline(uint64_t duration) {
- return makeDeadline(makeTimeoutDuration(duration));
-}
-
-// Convenience function. If the duration is provided, this function creates a
-// Deadline using makeDeadline. If the duration is not provided, this function
-// returns std::nullopt.
-inline std::optional<Deadline> makeDeadline(OptionalTimeoutDuration duration) {
- return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
-}
-inline std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
- return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
-}
-
-// Make an optional Deadline from an OptionalTimePoint. If
-// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a
-// time point holding the maximum Deadline. If the OptionalTimePoint is none,
-// this function returns std::nullopt.
-std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint);
-
-// Returns true if the deadline has passed. Returns false if either the deadline
-// has not been exceeded or if the deadline is not present.
-bool hasDeadlinePassed(const std::optional<Deadline>& deadline);
-
-// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not
-// provided, this function returns none for OptionalTimePoint.
-OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
-
-// Ensure that every user of FalseyErrorStream is linked to the
-// correct instance, using the correct LOG_TAG
-namespace {
-
-template <HalVersion version>
-struct VersionedType {};
-
-template <>
-struct VersionedType<HalVersion::V1_2> {
- using OperandPerformance = V1_2::Capabilities::OperandPerformance;
- using OperandType = V1_2::OperandType;
-};
-
-template <>
-struct VersionedType<HalVersion::V1_3> {
- using OperandPerformance = V1_3::Capabilities::OperandPerformance;
- using OperandType = V1_3::OperandType;
-};
-
-template <HalVersion version>
-using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance;
-template <HalVersion version>
-using VersionedOperandType = typename VersionedType<version>::OperandType;
-
-} // namespace
-
-// Return a vector with one entry for each non-extension OperandType except
-// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be
-// sorted by OperandType.
-//
-// Control flow (OperandType::SUBGRAPH) operation performance is specified
-// separately using Capabilities::ifPerformance and
-// Capabilities::whilePerformance.
-template <HalVersion version>
-hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
- V1_0::PerformanceInfo perf);
-
-// Update the vector entry corresponding to the specified OperandType with the
-// specified PerformanceInfo value. The vector must already have an entry for
-// that OperandType, and must be sorted by OperandType.
-void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
- V1_2::OperandType type, V1_0::PerformanceInfo perf);
-void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
- V1_3::OperandType type, V1_0::PerformanceInfo perf);
-
-// Look for a vector entry corresponding to the specified OperandType. If
-// found, return the associated PerformanceInfo. If not, return a pessimistic
-// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
-V1_0::PerformanceInfo lookup(
- const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
- V1_2::OperandType type);
-V1_0::PerformanceInfo lookup(
- const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
- V1_3::OperandType type);
-
-// Returns true if an operand type is an extension type.
-bool isExtensionOperandType(V1_3::OperandType type);
-
-// Returns true if an operation type is an extension type.
-bool isExtensionOperationType(V1_3::OperationType type);
-
-// Returns the amount of space needed to store a value of the specified
-// dimensions and type. For a tensor with unspecified rank or at least one
-// unspecified dimension, returns zero.
-//
-// Aborts if the specified type is an extension type.
-// Aborts if the size would overflow the return type.
-//
-// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
-uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
- const std::vector<uint32_t>& dimensions);
-uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
-
-// Returns the amount of space needed to store a value of the dimensions and
-// type of this operand. For a tensor with unspecified rank or at least one
-// unspecified dimension, returns zero.
-//
-// Aborts if the specified type is an extension type.
-// Aborts if the size would overflow the return type.
-//
-// See also TypeManager::getSizeOfData(const Operand&).
-inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
- return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
-}
-inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) {
- return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
-}
-
-// Returns the amount of space needed to store a value of the specified
-// dimensions and element size. For a tensor with unspecified rank or at least
-// one unspecified dimension, returns zero.
-//
-// Aborts if the size would overflow the return type.
-//
-// See also TypeManager::getSizeOfData(const Operand&).
-uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions);
-
-// Returns true if the amount of space needed to store a value of the specified
-// dimensions and element size overflows the uint32_t type.
-//
-// Aborts if the specified type is an extension type.
-//
-// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
- const std::vector<uint32_t>& dimensions);
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
- const std::vector<uint32_t>& dimensions);
-
-// Returns true if the amount of space needed to store a value of the specified
-// dimensions and element size overflows the uint32_t type.
-//
-// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
-bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions);
-
-// Returns true if a non-extension operand type is a scalar type.
-//
-// Aborts if the specified type is an extension type.
-//
-// See also TypeManager::isTensorType(OperandType).
-bool nonExtensionOperandTypeIsScalar(int type);
-
-// Returns the name of the operation type in ASCII.
-std::string getOperationName(V1_3::OperationType opCode);
-
-// Returns the name of the operand type in ASCII.
-std::string getOperandTypeName(V1_3::OperandType type);
-
-// Whether an operand of tensor type has unspecified dimensions.
-//
-// Undefined behavior if the operand type is a scalar type.
-bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
-bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
- const std::vector<uint32_t>& dimensions);
-bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions);
-bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions);
-bool tensorHasUnspecifiedDimensions(const Operand& operand);
-bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand);
-bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
-
-// Returns the number of padding bytes needed to align data of the
-// specified length. It aligns object of length:
-// 2, 3 on a 2 byte boundary,
-// 4+ on a 4 byte boundary.
-// We may want to have different alignments for tensors.
-// TODO: This is arbitrary, more a proof of concept. We need
-// to determine what this should be.
-uint32_t alignBytesNeeded(uint32_t index, size_t length);
-
-// Does a detailed LOG(INFO) of the model
-void logModelToInfo(const V1_0::Model& model);
-void logModelToInfo(const V1_1::Model& model);
-void logModelToInfo(const V1_2::Model& model);
-void logModelToInfo(const V1_3::Model& model);
-void logModelToInfo(const Model& model);
-
-inline std::string toString(uint32_t obj) {
- return std::to_string(obj);
-}
-
-template <typename Type>
-std::string toString(const std::vector<Type>& range) {
- std::string os = "[";
- for (size_t i = 0; i < range.size(); ++i) {
- os += (i == 0 ? "" : ", ") + toString(range[i]);
- }
- return os += "]";
-}
-
-template <typename A, typename B>
-std::string toString(const std::pair<A, B>& pair) {
- std::ostringstream oss;
- oss << "(" << pair.first << ", " << pair.second << ")";
- return oss.str();
-}
-
-inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) {
- return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM);
-}
-
-bool validateOperandSymmPerChannelQuantParams(
- const V1_3::Operand& halOperand,
- const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
-
-// Validates an operand type.
-//
-// extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
-//
-// If allowPartial is true, the dimensions may be underspecified.
-int validateOperandType(const ANeuralNetworksOperandType& type,
- const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
- const char* tag, bool allowPartial);
-int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
- const char* tag);
-
-// A set of functions to help validate models containing IF or WHILE operations.
-struct SubgraphValidationHelper {
- // Checks if a given operand is a SUBGRAPH operand with a valid offset.
- std::function<bool(const Operand&)> isValidSubgraphReference;
- // Gets the input count of a subgraph referenced by a given operand.
- std::function<uint32_t(const Operand&)> getSubgraphInputCount;
- // Gets the output count of a subgraph referenced by a given operand.
- std::function<uint32_t(const Operand&)> getSubgraphOutputCount;
- // Gets the specified input operand of a subgraph referenced by a given operand.
- std::function<const Operand*(const Operand&, uint32_t)> getSubgraphInputOperand;
- // Gets the specified output operand of a subgraph referenced by a given operand.
- std::function<const Operand*(const Operand&, uint32_t)> getSubgraphOutputOperand;
- // Whether control flow operations with inner or outer input or output
- // operands of unknown size are allowed.
- bool allowControlFlowOperationWithOperandOfUnknownSize;
-};
-
-// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the
-// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
-// The last argument is only used for validating IF and WHILE operations.
-int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
- const uint32_t* inputIndexes, uint32_t outputCount,
- const uint32_t* outputIndexes, const std::vector<Operand>& operands,
- HalVersion halVersion, const SubgraphValidationHelper& helper);
-
-inline size_t getSizeFromInts(int lower, int higher) {
- return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
-}
-
-// Convert ANEURALNETWORKS_* result code to ErrorStatus.
-// Not guaranteed to be a 1-to-1 mapping.
-ErrorStatus convertResultCodeToErrorStatus(int resultCode);
-V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode);
-
-// Convert ErrorStatus to ANEURALNETWORKS_* result code.
-// Not guaranteed to be a 1-to-1 mapping.
-int convertErrorStatusToResultCode(ErrorStatus status);
-int convertErrorStatusToResultCode(V1_3::ErrorStatus status);
-
-// Convert execution results to runtime format. Additionally checks that the
-// returned results abide by the HAL specification, and logs an error if the
-// result violates the specification.
-std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
- V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
- const V1_2::Timing& timing);
-std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
- ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing);
-
-// Versioning
-
-bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_0(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_0(const V1_2::Capabilities& capabilities);
-bool compliantWithV1_0(const V1_3::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_2::Capabilities& capabilities);
-bool compliantWithV1_1(const V1_3::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_2::Capabilities& capabilities);
-bool compliantWithV1_2(const V1_3::Capabilities& capabilities);
-bool compliantWithV1_3(const V1_0::Capabilities& capabilities);
-bool compliantWithV1_3(const V1_1::Capabilities& capabilities);
-bool compliantWithV1_3(const V1_2::Capabilities& capabilities);
-bool compliantWithV1_3(const V1_3::Capabilities& capabilities);
-
-// If noncompliantOperations != nullptr, then
-// precondition: noncompliantOperations->empty()
-// postcondition: *noncompliantOperations consists of the indices of the noncompliant
-// operations; if the compliance check fails for some reason
-// other than a noncompliant operation,
-// *noncompliantOperations consists of the indices of all operations
-bool compliantWithV1_0(const V1_0::Model& model);
-bool compliantWithV1_0(const V1_1::Model& model);
-bool compliantWithV1_0(const V1_2::Model& model,
- std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_0(const V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const V1_0::Model& model);
-bool compliantWithV1_1(const V1_1::Model& model);
-bool compliantWithV1_1(const V1_2::Model& model,
- std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_2(const V1_0::Model& model);
-bool compliantWithV1_2(const V1_1::Model& model);
-bool compliantWithV1_2(const V1_2::Model& model,
- std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_2(const V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations = nullptr);
-
-V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status);
-V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status);
-V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status);
-V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status);
-
-V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities);
-V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities);
-V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities);
-V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities);
-V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities);
-V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities);
-V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities);
-V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities);
-V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities);
-V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities);
-
-V1_0::Model convertToV1_0(const V1_0::Model& model);
-V1_0::Model convertToV1_0(const V1_1::Model& model);
-V1_0::Model convertToV1_0(const V1_2::Model& model);
-V1_0::Model convertToV1_0(const V1_3::Model& model);
-V1_1::Model convertToV1_1(const V1_0::Model& model);
-V1_1::Model convertToV1_1(const V1_1::Model& model);
-V1_1::Model convertToV1_1(const V1_2::Model& model);
-V1_1::Model convertToV1_1(const V1_3::Model& model);
-V1_2::Model convertToV1_2(const V1_0::Model& model);
-V1_2::Model convertToV1_2(const V1_1::Model& model);
-V1_2::Model convertToV1_2(const V1_2::Model& model);
-V1_2::Model convertToV1_2(const V1_3::Model& model);
-V1_3::Model convertToV1_3(const V1_0::Model& model);
-V1_3::Model convertToV1_3(const V1_1::Model& model);
-V1_3::Model convertToV1_3(const V1_2::Model& model);
-V1_3::Model convertToV1_3(const V1_3::Model& model);
-
-V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type);
-V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type);
-V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type);
-
-V1_0::Operand convertToV1_0(const V1_2::Operand& operand);
-V1_0::Operand convertToV1_0(const V1_3::Operand& operand);
-V1_2::Operand convertToV1_2(const V1_0::Operand& operand);
-V1_2::Operand convertToV1_2(const V1_3::Operand& operand);
-V1_3::Operand convertToV1_3(const V1_0::Operand& operand);
-V1_3::Operand convertToV1_3(const V1_2::Operand& operand);
-V1_3::Operand convertToV1_3(const V1_3::Operand& operand);
-
-hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands);
-hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands);
-hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands);
-hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands);
-hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands);
-hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands);
-hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands);
-hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands);
-hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands);
-
-bool compliantWithV1_0(const V1_0::Request& request);
-bool compliantWithV1_0(const V1_3::Request& request);
-bool compliantWithV1_2(const V1_3::Request& request);
-
-V1_0::Request convertToV1_0(const V1_0::Request& request);
-V1_0::Request convertToV1_0(const V1_3::Request& request);
-V1_0::Request convertToV1_2(const V1_3::Request& request);
-V1_3::Request convertToV1_3(const V1_0::Request& request);
-V1_3::Request convertToV1_3(const V1_3::Request& request);
-
-bool compliantWithV1_0(V1_0::OperandLifeTime lifetime);
-bool compliantWithV1_0(V1_3::OperandLifeTime lifetime);
-bool compliantWithV1_3(V1_0::OperandLifeTime lifetime);
-bool compliantWithV1_3(V1_3::OperandLifeTime lifetime);
-
-V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime);
-V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime);
-V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime);
-V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime);
-
-constexpr V1_3::Priority convertToHalPriority(int32_t priority) {
- switch (priority) {
- case ANEURALNETWORKS_PRIORITY_LOW:
- return V1_3::Priority::LOW;
- case ANEURALNETWORKS_PRIORITY_MEDIUM:
- return V1_3::Priority::MEDIUM;
- case ANEURALNETWORKS_PRIORITY_HIGH:
- return V1_3::Priority::HIGH;
- }
- LOG(FATAL) << "unrecognized priority: " << priority;
- return {};
-}
-
-constexpr Priority convertToCanonicalPriority(int32_t priority) {
- switch (priority) {
- case ANEURALNETWORKS_PRIORITY_LOW:
- return Priority::LOW;
- case ANEURALNETWORKS_PRIORITY_MEDIUM:
- return Priority::MEDIUM;
- case ANEURALNETWORKS_PRIORITY_HIGH:
- return Priority::HIGH;
- }
- LOG(FATAL) << "unrecognized priority: " << priority;
- return {};
-}
-
-// The function syncWait() has the same semantics as the system function
-// ::sync_wait(), except that the syncWait() return value is semantically
-// richer. The timeout parameter is in msecs.
-enum class FenceState {
- ACTIVE, // fence has not been signaled
- SIGNALED, // fence has been signaled
- ERROR, // fence has been placed in the error state
- UNKNOWN, // either bad argument passed to syncWait(), or internal error
-};
-FenceState syncWait(int fd, int timeout);
-
-#ifdef NN_DEBUGGABLE
-uint32_t getProp(const char* str, uint32_t defaultValue = 0);
-#endif // NN_DEBUGGABLE
-
-// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h.
-Capabilities::OperandPerformance uncheckedConvert(
- const V1_3::Capabilities::OperandPerformance& operandPerformance);
-Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo);
-Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities);
-DataLocation uncheckedConvert(const V1_0::DataLocation& location);
-ErrorStatus uncheckedConvert(V1_0::ErrorStatus status);
-ErrorStatus uncheckedConvert(V1_3::ErrorStatus status);
-Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&);
-Extension uncheckedConvert(const V1_2::Extension& extension);
-hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params);
-MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure);
-Memory uncheckedConvert(const hardware::hidl_memory& memory);
-Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&);
-Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph);
-Model uncheckedConvert(const V1_3::Model& model);
-Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params);
-Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params);
-Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime);
-Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params);
-OperandType uncheckedConvert(V1_3::OperandType operandType);
-Operand uncheckedConvert(const V1_3::Operand& operand);
-OperationType uncheckedConvert(V1_3::OperationType operationType);
-Operation uncheckedConvert(const V1_3::Operation& operation);
-OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration);
-OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape);
-Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument);
-Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool);
-Request uncheckedConvert(const V1_3::Request& request);
-std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions);
-std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories);
-std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs);
-std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands);
-std::vector<OutputShape> uncheckedConvert(
- const hardware::hidl_vec<V1_2::OutputShape>& outputShapes);
-std::vector<Request::MemoryPool> uncheckedConvert(
- const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools);
-Timing uncheckedConvert(const V1_2::Timing& timing);
-
-// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h.
-hardware::hidl_memory convertToV1_0(const Memory& memory);
-hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories);
-hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues);
-hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes);
-hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles);
-V1_0::DataLocation convertToV1_0(const DataLocation& location);
-V1_0::ErrorStatus convertToV1_0(ErrorStatus status);
-V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument);
-V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference);
-V1_2::MeasureTiming convertToV1_2(MeasureTiming measure);
-V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&);
-V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params);
-V1_2::OutputShape convertToV1_2(const OutputShape& outputShape);
-V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params);
-V1_2::Timing convertToV1_2(const Timing& timing);
-V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole);
-V1_3::ErrorStatus convertToV1_3(ErrorStatus status);
-V1_3::Model convertToV1_3(const Model& model);
-V1_3::Operand convertToV1_3(const Operand& operand);
-V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime);
-V1_3::OperandType convertToV1_3(OperandType operandType);
-V1_3::Operation convertToV1_3(const Operation& operation);
-V1_3::OperationType convertToV1_3(OperationType operationType);
-V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration);
-V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint);
-V1_3::Priority convertToV1_3(Priority priority);
-V1_3::Request convertToV1_3(const Request& request);
-V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool);
-V1_3::Subgraph convertToV1_3(const Model::Subgraph& model);
-
-} // namespace nn
-} // namespace android
+#include "LegacyHalUtils.h"
+#include "LegacyUtils.h"
#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
diff --git a/nn/common/include/nnapi/IDevice.h b/nn/common/include/nnapi/IDevice.h
index 9a2e51680..75cf0a523 100644
--- a/nn/common/include/nnapi/IDevice.h
+++ b/nn/common/include/nnapi/IDevice.h
@@ -245,8 +245,8 @@ class IDevice {
*/
virtual GeneralResult<SharedPreparedModel> prepareModel(
const Model& model, ExecutionPreference preference, Priority priority,
- OptionalTimePoint deadline, const std::vector<NativeHandle>& modelCache,
- const std::vector<NativeHandle>& dataCache, const CacheToken& token) const = 0;
+ OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
+ const std::vector<SharedHandle>& dataCache, const CacheToken& token) const = 0;
/**
* Creates a prepared model from cache files for execution.
@@ -301,8 +301,8 @@ class IDevice {
* for execution, otherwise GeneralError.
*/
virtual GeneralResult<SharedPreparedModel> prepareModelFromCache(
- OptionalTimePoint deadline, const std::vector<NativeHandle>& modelCache,
- const std::vector<NativeHandle>& dataCache, const CacheToken& token) const = 0;
+ OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
+ const std::vector<SharedHandle>& dataCache, const CacheToken& token) const = 0;
/**
* Allocates a driver-managed buffer with the properties specified by the descriptor as well as
diff --git a/nn/common/include/nnapi/TypeUtils.h b/nn/common/include/nnapi/TypeUtils.h
index 6b2af916f..b32f78b66 100644
--- a/nn/common/include/nnapi/TypeUtils.h
+++ b/nn/common/include/nnapi/TypeUtils.h
@@ -88,7 +88,7 @@ std::ostream& operator<<(std::ostream& os,
std::ostream& operator<<(std::ostream& os, const Operand::ExtraParams& extraParams);
std::ostream& operator<<(std::ostream& os, const Operand& operand);
std::ostream& operator<<(std::ostream& os, const Operation& operation);
-std::ostream& operator<<(std::ostream& os, const NativeHandle& handle);
+std::ostream& operator<<(std::ostream& os, const SharedHandle& handle);
std::ostream& operator<<(std::ostream& os, const Memory& memory);
std::ostream& operator<<(std::ostream& os, const Model::Subgraph& subgraph);
std::ostream& operator<<(std::ostream& os, const Model::OperandValues& operandValues);
diff --git a/nn/common/include/nnapi/Types.h b/nn/common/include/nnapi/Types.h
index 3b9725d27..5adab6bfa 100644
--- a/nn/common/include/nnapi/Types.h
+++ b/nn/common/include/nnapi/Types.h
@@ -18,8 +18,7 @@
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H
#include <android-base/expected.h>
-#include <utils/NativeHandle.h>
-#include <utils/StrongPointer.h>
+#include <android-base/unique_fd.h>
#include <array>
#include <chrono>
@@ -238,10 +237,15 @@ struct Operand {
ExtraParams extraParams;
};
-using NativeHandle = ::android::sp<::android::NativeHandle>;
+struct Handle {
+ std::vector<base::unique_fd> fds;
+ std::vector<int> ints;
+};
+
+using SharedHandle = std::shared_ptr<const Handle>;
struct Memory {
- NativeHandle handle;
+ SharedHandle handle;
size_t size = 0;
std::string name;
};
@@ -313,7 +317,8 @@ struct Request {
class SyncFence {
public:
static SyncFence createAsSignaled();
- static Result<SyncFence> create(NativeHandle syncFence);
+ static SyncFence create(base::unique_fd fd);
+ static Result<SyncFence> create(SharedHandle syncFence);
// The function syncWait() has the same semantics as the system function
// ::sync_wait(), except that the syncWait() return value is semantically
@@ -329,12 +334,14 @@ class SyncFence {
FenceState syncWait(OptionalTimeout optionalTimeout) const;
- NativeHandle getHandle() const;
+ SharedHandle getSharedHandle() const;
+ bool hasFd() const;
+ int getFd() const;
private:
- explicit SyncFence(NativeHandle syncFence);
+ explicit SyncFence(SharedHandle syncFence);
- NativeHandle mSyncFence;
+ SharedHandle mSyncFence;
};
using Clock = std::chrono::steady_clock;
diff --git a/nn/common/include/nnapi/Validation.h b/nn/common/include/nnapi/Validation.h
index 3eda174f6..ea213bdd8 100644
--- a/nn/common/include/nnapi/Validation.h
+++ b/nn/common/include/nnapi/Validation.h
@@ -42,7 +42,7 @@ Result<Version> validate(const OutputShape& outputShape);
Result<Version> validate(const Timing& timing);
Result<Version> validate(const Capabilities& capabilities);
Result<Version> validate(const Extension& extension);
-Result<Version> validate(const NativeHandle& handle);
+Result<Version> validate(const SharedHandle& handle);
Result<Version> validate(const Memory& memory);
Result<Version> validate(const Model& model);
Result<Version> validate(const BufferDesc& bufferDesc);
@@ -53,7 +53,7 @@ Result<Version> validate(const OptionalTimeoutDuration& optionalTimeoutDuration)
Result<Version> validate(const std::vector<OutputShape>& outputShapes);
Result<Version> validate(const std::vector<Extension>& extensions);
-Result<Version> validate(const std::vector<NativeHandle>& handles);
+Result<Version> validate(const std::vector<SharedHandle>& handles);
Result<Version> validate(const std::vector<BufferRole>& bufferRoles);
// Validate request applied to model.
diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp
index 90d58e490..d977878cd 100644
--- a/nn/runtime/Manager.cpp
+++ b/nn/runtime/Manager.cpp
@@ -432,8 +432,6 @@ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> DriverPreparedM
CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd > 0; }));
// Make a copy of the memory tracker as we will append memory pools for pointer arguments.
std::vector<const RuntimeMemory*> localMemories = memories;
- sp<V1_3::IFencedExecutionCallback> executeFencedCallback;
- Timing timing;
// We separate the input & output pools so accelerators only need to copy
// the contents of the input pools. We could also use it to set protection
@@ -443,12 +441,12 @@ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> DriverPreparedM
const auto [n1, inputPtrArgsMemory, inputPtrArgsLocations] =
allocatePointerArgumentsToPool(inputs, &localMemories);
if (n1 != ANEURALNETWORKS_NO_ERROR) {
- return {n1, -1, nullptr, timing};
+ return {n1, -1, nullptr, {}};
}
const auto [n2, outputPtrArgsMemory, outputPtrArgsLocations] =
allocatePointerArgumentsToPool(outputs, &localMemories);
if (n2 != ANEURALNETWORKS_NO_ERROR) {
- return {n2, -1, nullptr, timing};
+ return {n2, -1, nullptr, {}};
}
// Copy the input data that was specified via a pointer.
@@ -475,28 +473,18 @@ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> DriverPreparedM
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
"DriverPreparedModel::executeFenced");
- int n = ANEURALNETWORKS_OP_FAILED;
- hardware::hidl_vec<hardware::hidl_handle> waitForHandles;
- waitForHandles.resize(waitFor.size());
- for (uint32_t i = 0; i < waitFor.size(); i++) {
- native_handle_t* nativeHandle = native_handle_create(1, 0);
- if (nativeHandle == nullptr) {
- LOG(ERROR) << "Failed to create native_handle";
- return {n, -1, nullptr, timing};
- }
- int dupFd = dup(waitFor[i]);
+ std::vector<SyncFence> waitForHandles;
+ waitForHandles.reserve(waitFor.size());
+ for (int fd : waitFor) {
+ int dupFd = dup(fd);
if (dupFd <= 0) {
LOG(ERROR) << "Unable to dup the file descriptor";
- return {n, -1, nullptr, timing};
+ return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}};
}
- nativeHandle->data[0] = dupFd;
- hardware::hidl_handle hidlHandle;
- hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
- waitForHandles[i] = std::move(hidlHandle);
+ waitForHandles.push_back(SyncFence::create(base::unique_fd(dupFd)));
}
- hardware::hidl_handle syncFence;
- std::tie(n, syncFence, executeFencedCallback, timing) =
+ auto [n, syncFence, executeFencedCallback, timing] =
mPreparedModel->executeFenced(request, waitForHandles, measure, deadline,
loopTimeoutDuration, timeoutDurationAfterFence);
@@ -506,8 +494,8 @@ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> DriverPreparedM
}
int syncFenceFd = -1;
- if (syncFence.getNativeHandle()) {
- syncFenceFd = dup(syncFence.getNativeHandle()->data[0]);
+ if (syncFence.hasFd()) {
+ syncFenceFd = dup(syncFence.getFd());
if (syncFenceFd < 0) {
LOG(ERROR) << "Failed to dup the file descriptor";
return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, timing};
diff --git a/nn/runtime/VersionedInterfaces.cpp b/nn/runtime/VersionedInterfaces.cpp
index fce558c38..8246b623e 100644
--- a/nn/runtime/VersionedInterfaces.cpp
+++ b/nn/runtime/VersionedInterfaces.cpp
@@ -24,6 +24,7 @@
#include <android-base/thread_annotations.h>
#include <cutils/native_handle.h>
#include <fcntl.h>
+#include <nnapi/hal/CommonUtils.h>
#include <algorithm>
#include <chrono>
@@ -416,67 +417,83 @@ static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
return result;
}
-std::tuple<int, hardware::hidl_handle, sp<V1_3::IFencedExecutionCallback>, Timing>
+std::tuple<int, SyncFence, sp<V1_3::IFencedExecutionCallback>, Timing>
VersionedIPreparedModel::executeFenced(const Request& request,
- const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
- MeasureTiming measure,
+ const std::vector<SyncFence>& waitFor, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
const OptionalTimeoutDuration& timeoutDurationAfterFence) {
// version 1.3 HAL
- hardware::hidl_handle syncFence;
+ hardware::hidl_handle hidlSyncFence;
sp<V1_3::IFencedExecutionCallback> dispatchCallback;
Timing timing = {UINT64_MAX, UINT64_MAX};
if (mPreparedModelV1_3 != nullptr) {
ErrorStatus errorStatus;
const auto otp = makeTimePoint(deadline);
+ auto waitForHandles = hal::utils::convertSyncFences(waitFor);
+ if (!waitForHandles.has_value()) {
+ LOG(ERROR) << "executeFenced failure: " << waitForHandles.error().message;
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(),
+ nullptr, timing);
+ }
hardware::Return<void> ret = mPreparedModelV1_3->executeFenced(
- convertToV1_3(request), waitFor, convertToV1_2(measure), convertToV1_3(otp),
- convertToV1_3(loopTimeoutDuration), convertToV1_3(timeoutDurationAfterFence),
- [&syncFence, &errorStatus, &dispatchCallback](
+ convertToV1_3(request), std::move(waitForHandles).value(), convertToV1_2(measure),
+ convertToV1_3(otp), convertToV1_3(loopTimeoutDuration),
+ convertToV1_3(timeoutDurationAfterFence),
+ [&hidlSyncFence, &errorStatus, &dispatchCallback](
V1_3::ErrorStatus error, const hardware::hidl_handle& handle,
const sp<V1_3::IFencedExecutionCallback>& callback) {
- syncFence = handle;
+ hidlSyncFence = handle;
errorStatus = uncheckedConvert(error);
dispatchCallback = callback;
});
if (!ret.isOk()) {
LOG(ERROR) << "executeFenced failure: " << ret.description();
- return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr),
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(),
nullptr, timing);
}
if (errorStatus != ErrorStatus::NONE) {
LOG(ERROR) << "executeFenced returned " << errorStatus;
return std::make_tuple(convertErrorStatusToResultCode(errorStatus),
- hardware::hidl_handle(nullptr), nullptr, timing);
+ SyncFence::createAsSignaled(), nullptr, timing);
}
- return std::make_tuple(ANEURALNETWORKS_NO_ERROR, syncFence, dispatchCallback, timing);
+ auto sharedHandle = hal::utils::sharedHandleFromNativeHandle(hidlSyncFence);
+ if (!sharedHandle.has_value()) {
+ LOG(ERROR) << "executeFenced failure: " << sharedHandle.error().message;
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(),
+ nullptr, timing);
+ }
+ auto syncFence = sharedHandle.value() == nullptr
+ ? SyncFence::createAsSignaled()
+ : SyncFence::create(std::move(sharedHandle).value());
+ if (!syncFence.has_value()) {
+ LOG(ERROR) << "executeFenced failure: " << syncFence.error();
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(),
+ nullptr, timing);
+ }
+ return std::make_tuple(ANEURALNETWORKS_NO_ERROR, std::move(syncFence).value(),
+ dispatchCallback, timing);
}
// fallback to synchronous execution if sync_fence is not supported
// first wait for all sync fences to be ready.
LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution";
- for (const auto& fenceHandle : waitFor) {
- if (!fenceHandle.getNativeHandle()) {
- return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr),
- nullptr, timing);
- }
- int syncFd = fenceHandle.getNativeHandle()->data[0];
- if (syncFd <= 0) {
- return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr),
- nullptr, timing);
+ for (const auto& fence : waitFor) {
+ if (!fence.hasFd() || fence.getFd() <= 0) {
+ return std::make_tuple(ANEURALNETWORKS_BAD_DATA, SyncFence::createAsSignaled(), nullptr,
+ timing);
}
- auto r = syncWait(syncFd, -1);
- if (r != FenceState::SIGNALED) {
- LOG(ERROR) << "syncWait failed, fd: " << syncFd;
- return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr),
+ auto r = fence.syncWait({/* no timeout */});
+ if (r != SyncFence::FenceState::SIGNALED) {
+ LOG(ERROR) << "syncWait failed, fd: " << fence.getFd() << ", state: " << r;
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(),
nullptr, timing);
}
}
int errorCode;
std::tie(errorCode, std::ignore, timing) =
executeSynchronously(request, measure, deadline, loopTimeoutDuration);
- return std::make_tuple(errorCode, hardware::hidl_handle(nullptr), nullptr, timing);
+ return std::make_tuple(errorCode, SyncFence::createAsSignaled(), nullptr, timing);
}
static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
diff --git a/nn/runtime/VersionedInterfaces.h b/nn/runtime/VersionedInterfaces.h
index d41dcd3ad..bd7e396bf 100644
--- a/nn/runtime/VersionedInterfaces.h
+++ b/nn/runtime/VersionedInterfaces.h
@@ -754,8 +754,8 @@ class VersionedIPreparedModel {
* all sync fences in waitFor are signaled.
* @return A tuple consisting of:
* - Error code of the dispatch call.
- * - A sync_fence that will be triggered when the task is completed.
- * The sync_fence will be set to error if critical error occurs when doing
+ * - A SyncFence that will be triggered when the task is completed.
+ * The SyncFence will be set to error if critical error occurs when doing
* actual evaluation.
* - A callback can be used to query information like duration
* and detailed runtime error status when the task is completed.
@@ -763,11 +763,11 @@ class VersionedIPreparedModel {
* sync execution. Either IFencedExecutionCallback will be
* returned or optional timing information is returned
*/
- std::tuple<int, hardware::hidl_handle, sp<V1_3::IFencedExecutionCallback>, Timing>
- executeFenced(const Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
- MeasureTiming measure, const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& timeoutDurationAfterFence);
+ std::tuple<int, SyncFence, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
+ const Request& request, const std::vector<SyncFence>& waitFor, MeasureTiming measure,
+ const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence);
private:
friend class VersionedIDevice;
diff --git a/nn/runtime/test/HalUtils.h b/nn/runtime/test/HalUtils.h
new file mode 100644
index 000000000..a1cb5b13d
--- /dev/null
+++ b/nn/runtime/test/HalUtils.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H
+#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H
+
+#include "HalInterfaces.h"
+#include "Utils.h"
+
+namespace android::nn {
+
+// Creates valid V1_3::Capabilities.
+inline V1_3::Capabilities makeCapabilities(float perf) {
+ const V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = perfInfo,
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(perfInfo),
+ .ifPerformance = perfInfo,
+ .whilePerformance = perfInfo};
+};
+
+} // namespace android::nn
+
+#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H
diff --git a/nn/runtime/test/TestExtensions.cpp b/nn/runtime/test/TestExtensions.cpp
index da13073e2..d9fa96d7c 100644
--- a/nn/runtime/test/TestExtensions.cpp
+++ b/nn/runtime/test/TestExtensions.cpp
@@ -20,6 +20,7 @@
#include <vector>
#include "HalInterfaces.h"
+#include "HalUtils.h"
#include "Manager.h"
#include "NeuralNetworks.h"
#include "NeuralNetworksExtensions.h"
@@ -56,7 +57,7 @@ class TestDriver : public SampleDriver {
}
hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
- cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
+ cb(V1_3::ErrorStatus::NONE, ::android::nn::makeCapabilities(1.0));
return hardware::Void();
}
diff --git a/nn/runtime/test/TestFailingDriver.cpp b/nn/runtime/test/TestFailingDriver.cpp
index d2e30a656..a7a0aa5d9 100644
--- a/nn/runtime/test/TestFailingDriver.cpp
+++ b/nn/runtime/test/TestFailingDriver.cpp
@@ -22,6 +22,7 @@
#include "CompilationBuilder.h"
#include "ExecutionPlan.h"
+#include "HalUtils.h"
#include "Manager.h"
#include "SampleDriverPartial.h"
#include "TestNeuralNetworksWrapper.h"
@@ -51,10 +52,7 @@ class FailingTestDriver : public SampleDriverPartial {
FailingTestDriver() : SampleDriverPartial(kTestDriverName, &mEmptyOperationResolver) {}
hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
- cb(V1_3::ErrorStatus::NONE,
- {.operandPerformance = {{.type = V1_3::OperandType::TENSOR_FLOAT32,
- .info = {.execTime = 0.1, // Faster than CPU.
- .powerUsage = 0.1}}}});
+ cb(V1_3::ErrorStatus::NONE, makeCapabilities(0.1)); // Faster than CPU.
return hardware::Void();
}
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index 939612a78..8e705afb9 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -33,6 +33,7 @@
#include "ControlFlow.h"
#include "ExecutionPlan.h"
#include "HalInterfaces.h"
+#include "HalUtils.h"
#include "Manager.h"
#include "ModelBuilder.h"
#include "NeuralNetworks.h"
@@ -175,16 +176,6 @@ using WrapperSymmPerChannelQuantParams = ::android::nn::test_wrapper::SymmPerCha
using WrapperType = ::android::nn::test_wrapper::Type;
using android::sp;
-V1_3::Capabilities makeCapabilities(float perf) {
- V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
- return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
- .relaxedFloat32toFloat16PerformanceTensor = perfInfo,
- .operandPerformance =
- ::android::nn::nonExtensionOperandPerformance<HalVersion::V1_3>(perfInfo),
- .ifPerformance = perfInfo,
- .whilePerformance = perfInfo};
-};
-
void update(V1_3::Capabilities* capabilities, V1_3::OperandType type, float perf) {
V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
::android::nn::update(&capabilities->operandPerformance, type, perfInfo);
@@ -2056,7 +2047,7 @@ TEST_F(PartitioningTest, Perf) {
model.finish();
ASSERT_TRUE(model.isValid());
- const V1_3::Capabilities baseCapabilities = makeCapabilities(0.5);
+ const V1_3::Capabilities baseCapabilities = ::android::nn::makeCapabilities(0.5);
{
// better than base
@@ -2846,7 +2837,7 @@ TEST_F(PerfTest, Lookup) {
// We'll use this to ensure that we can save and then recover a type's performance.
auto typePerf = [](V1_3::OperandType type) { return float(static_cast<uint32_t>(type)); };
- V1_3::Capabilities capabilities = makeCapabilities(-1.0f);
+ V1_3::Capabilities capabilities = ::android::nn::makeCapabilities(-1.0f);
for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
diff --git a/nn/runtime/test/TestRemoveDefaultArguments.cpp b/nn/runtime/test/TestRemoveDefaultArguments.cpp
index daef6bf60..6b7283f44 100644
--- a/nn/runtime/test/TestRemoveDefaultArguments.cpp
+++ b/nn/runtime/test/TestRemoveDefaultArguments.cpp
@@ -23,6 +23,7 @@
#include <vector>
#include "GeneratedTestUtils.h"
+#include "HalUtils.h"
#include "Manager.h"
#include "SampleDriverPartial.h"
#include "TestNeuralNetworksWrapper.h"
@@ -113,7 +114,7 @@ class TestDriver : public SampleDriverPartial {
TestDriver() : SampleDriverPartial(kTestDriverName) {}
hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
- cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
+ cb(V1_3::ErrorStatus::NONE, makeCapabilities(1.0));
return hardware::Void();
}
diff --git a/nn/runtime/test/TestVersionedInterfaces.cpp b/nn/runtime/test/TestVersionedInterfaces.cpp
index b4f32bcde..4187029f9 100644
--- a/nn/runtime/test/TestVersionedInterfaces.cpp
+++ b/nn/runtime/test/TestVersionedInterfaces.cpp
@@ -2182,7 +2182,7 @@ TEST_F(VersionedIPreparedModelV1_0Test, executeFenced) {
// verify success
EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2198,7 +2198,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFenced) {
// verify success
EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2214,7 +2214,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) {
// verify success
EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2235,7 +2235,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executeFenced) {
// verify success
EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
- EXPECT_NE(nullptr, syncFence.getNativeHandle());
+ EXPECT_NE(nullptr, syncFence.getSharedHandle());
EXPECT_NE(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2494,7 +2494,7 @@ TEST_F(VersionedIPreparedModelV1_0Test, executeFencedFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2511,7 +2511,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFencedFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2528,7 +2528,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2550,7 +2550,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executeFencedFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2759,7 +2759,7 @@ TEST_F(VersionedIPreparedModelV1_0Test, executeFencedTransportFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2776,7 +2776,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFencedTransportFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2793,7 +2793,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFencedTransportFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}
@@ -2810,7 +2810,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executeFencedTransportFailure) {
// verify failure
EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode);
- EXPECT_EQ(nullptr, syncFence.getNativeHandle());
+ EXPECT_EQ(nullptr, syncFence.getSharedHandle());
EXPECT_EQ(nullptr, dispatchCallback.get());
EXPECT_EQ(kNoTiming, timing);
}