summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSlava Shklyaev <slavash@google.com>2020-08-13 13:16:03 +0100
committerSlava Shklyaev <slavash@google.com>2020-11-02 10:07:07 +0000
commitcbcaa00003cf5a4597460dbb5f8cb9f992e939e7 (patch)
treefcfc08ec92b731594fc96c586e1207bccce2624d
parente4a030ef549d8d2c279436628c961eb3f70df7d6 (diff)
downloadml-cbcaa00003cf5a4597460dbb5f8cb9f992e939e7.tar.gz
Migrate NNAPI runtime to canonical types
This change replaces most uses of HAL types in the codebase with equivalent canonical types. Later changes will introduce more refactorings. Also removes unused files nn/runtime/test/Bridge.{h,cpp}. Bug: 160669906 Fix: 155923931 Test: NeuralNetworksTest_static (all 7 passes) Test: NeuralNetworksTest_operations Test: NeuralNetworksTest_utils Test: NeuralNetworksTest_logtag Test: nnCache_test Test: BlobCache_test Change-Id: I63fa286e926a096948f1b1b172d1d562c4f52f29 Merged-In: I63fa286e926a096948f1b1b172d1d562c4f52f29 (cherry picked from commit 069993366ce59913ff162ed144749bb8794d990c)
-rw-r--r--nn/common/Android.bp18
-rw-r--r--nn/common/BufferTracker.cpp16
-rw-r--r--nn/common/CpuExecutor.cpp178
-rw-r--r--nn/common/ExecutionBurstController.cpp68
-rw-r--r--nn/common/ExecutionBurstServer.cpp61
-rw-r--r--nn/common/GraphDump.cpp55
-rw-r--r--nn/common/MetaModel.cpp190
-rw-r--r--nn/common/OperationResolver.cpp2
-rw-r--r--nn/common/OperationsUtils.cpp14
-rw-r--r--nn/common/Utils.cpp854
-rw-r--r--nn/common/UtilsTest.cpp13
-rw-r--r--nn/common/ValidateHal.cpp331
-rw-r--r--nn/common/include/BufferTracker.h14
-rw-r--r--nn/common/include/CpuExecutor.h67
-rw-r--r--nn/common/include/GraphDump.h7
-rw-r--r--nn/common/include/HalInterfaces.h76
-rw-r--r--nn/common/include/MetaModel.h36
-rw-r--r--nn/common/include/OperationResolver.h20
-rw-r--r--nn/common/include/OperationsUtils.h21
-rw-r--r--nn/common/include/Utils.h452
-rw-r--r--nn/common/include/ValidateHal.h36
-rw-r--r--nn/common/operations/Activation.cpp5
-rw-r--r--nn/common/operations/ArgMinMax.cpp3
-rw-r--r--nn/common/operations/BidirectionalSequenceLSTM.cpp3
-rw-r--r--nn/common/operations/BidirectionalSequenceLSTM.h9
-rw-r--r--nn/common/operations/BidirectionalSequenceRNN.cpp5
-rw-r--r--nn/common/operations/Broadcast.cpp14
-rw-r--r--nn/common/operations/Cast.cpp3
-rw-r--r--nn/common/operations/ChannelShuffle.cpp3
-rw-r--r--nn/common/operations/Comparisons.cpp5
-rw-r--r--nn/common/operations/Concatenation.cpp3
-rw-r--r--nn/common/operations/Conv2D.cpp15
-rw-r--r--nn/common/operations/DepthwiseConv2D.cpp14
-rw-r--r--nn/common/operations/Dequantize.cpp15
-rw-r--r--nn/common/operations/Elementwise.cpp3
-rw-r--r--nn/common/operations/Elu.cpp3
-rw-r--r--nn/common/operations/EmbeddingLookup.cpp3
-rw-r--r--nn/common/operations/EmbeddingLookup.h4
-rw-r--r--nn/common/operations/Fill.cpp7
-rw-r--r--nn/common/operations/FullyConnected.cpp3
-rw-r--r--nn/common/operations/Gather.cpp3
-rw-r--r--nn/common/operations/GenerateProposals.cpp3
-rw-r--r--nn/common/operations/HashtableLookup.cpp3
-rw-r--r--nn/common/operations/HashtableLookup.h4
-rw-r--r--nn/common/operations/HeatmapMaxKeypoint.cpp3
-rw-r--r--nn/common/operations/InstanceNormalization.cpp3
-rw-r--r--nn/common/operations/L2Normalization.cpp3
-rw-r--r--nn/common/operations/LSHProjection.cpp13
-rw-r--r--nn/common/operations/LSHProjection.h6
-rw-r--r--nn/common/operations/LSTM.cpp10
-rw-r--r--nn/common/operations/LSTM.h6
-rw-r--r--nn/common/operations/LocalResponseNormalization.cpp3
-rw-r--r--nn/common/operations/LogSoftmax.cpp9
-rw-r--r--nn/common/operations/LogicalAndOr.cpp6
-rw-r--r--nn/common/operations/LogicalNot.cpp3
-rw-r--r--nn/common/operations/MaximumMinimum.cpp5
-rw-r--r--nn/common/operations/Multinomial.cpp3
-rw-r--r--nn/common/operations/Multinomial.h6
-rw-r--r--nn/common/operations/MultinomialTest.cpp12
-rw-r--r--nn/common/operations/Neg.cpp3
-rw-r--r--nn/common/operations/PRelu.cpp3
-rw-r--r--nn/common/operations/Pooling.cpp6
-rw-r--r--nn/common/operations/Pow.cpp6
-rw-r--r--nn/common/operations/QLSTM.cpp2
-rw-r--r--nn/common/operations/Quantize.cpp10
-rw-r--r--nn/common/operations/QuantizedLSTM.cpp3
-rw-r--r--nn/common/operations/QuantizedLSTM.h4
-rw-r--r--nn/common/operations/RNN.cpp3
-rw-r--r--nn/common/operations/RNN.h6
-rw-r--r--nn/common/operations/Rank.cpp27
-rw-r--r--nn/common/operations/Reduce.cpp3
-rw-r--r--nn/common/operations/ResizeImageOps.cpp12
-rw-r--r--nn/common/operations/RoiAlign.cpp3
-rw-r--r--nn/common/operations/RoiPooling.cpp3
-rw-r--r--nn/common/operations/SVDF.cpp3
-rw-r--r--nn/common/operations/SVDF.h8
-rw-r--r--nn/common/operations/Select.cpp5
-rw-r--r--nn/common/operations/Slice.cpp3
-rw-r--r--nn/common/operations/Softmax.cpp3
-rw-r--r--nn/common/operations/Squeeze.cpp5
-rw-r--r--nn/common/operations/StridedSlice.cpp5
-rw-r--r--nn/common/operations/Tile.cpp3
-rw-r--r--nn/common/operations/TopK_V2.cpp7
-rw-r--r--nn/common/operations/Transpose.cpp3
-rw-r--r--nn/common/operations/TransposeConv2D.cpp15
-rw-r--r--nn/common/operations/UnidirectionalSequenceLSTM.cpp5
-rw-r--r--nn/common/operations/UnidirectionalSequenceRNN.cpp6
-rw-r--r--nn/driver/sample/SampleDriver.cpp529
-rw-r--r--nn/driver/sample/SampleDriver.h187
-rw-r--r--nn/driver/sample/SampleDriverFloatFast.cpp22
-rw-r--r--nn/driver/sample/SampleDriverFloatSlow.cpp22
-rw-r--r--nn/driver/sample/SampleDriverFloatXNNPACK.cpp1009
-rw-r--r--nn/driver/sample/SampleDriverFull.cpp22
-rw-r--r--nn/driver/sample/SampleDriverFull.h10
-rw-r--r--nn/driver/sample/SampleDriverMinimal.cpp28
-rw-r--r--nn/driver/sample/SampleDriverPartial.cpp20
-rw-r--r--nn/driver/sample/SampleDriverPartial.h18
-rw-r--r--nn/driver/sample/SampleDriverQuant.cpp26
-rw-r--r--nn/driver/sample/SampleDriverUtils.cpp20
-rw-r--r--nn/driver/sample/SampleDriverUtils.h48
-rw-r--r--nn/runtime/Callbacks.cpp71
-rw-r--r--nn/runtime/Callbacks.h75
-rw-r--r--nn/runtime/CompilationBuilder.cpp2
-rw-r--r--nn/runtime/Event.h22
-rw-r--r--nn/runtime/ExecutionBuilder.cpp118
-rw-r--r--nn/runtime/ExecutionBuilder.h68
-rw-r--r--nn/runtime/ExecutionPlan.cpp210
-rw-r--r--nn/runtime/ExecutionPlan.h66
-rw-r--r--nn/runtime/Manager.cpp232
-rw-r--r--nn/runtime/Manager.h67
-rw-r--r--nn/runtime/Memory.cpp91
-rw-r--r--nn/runtime/Memory.h74
-rw-r--r--nn/runtime/ModelArgumentInfo.cpp14
-rw-r--r--nn/runtime/ModelArgumentInfo.h19
-rw-r--r--nn/runtime/ModelBuilder.cpp171
-rw-r--r--nn/runtime/ModelBuilder.h29
-rw-r--r--nn/runtime/NeuralNetworks.cpp43
-rw-r--r--nn/runtime/TypeManager.cpp20
-rw-r--r--nn/runtime/TypeManager.h23
-rw-r--r--nn/runtime/VersionedInterfaces.cpp605
-rw-r--r--nn/runtime/VersionedInterfaces.h106
-rw-r--r--nn/runtime/test/Android.bp2
-rw-r--r--nn/runtime/test/Bridge.cpp36
-rw-r--r--nn/runtime/test/Bridge.h42
-rw-r--r--nn/runtime/test/TestCompilationCaching.cpp169
-rw-r--r--nn/runtime/test/TestCompliance.cpp19
-rw-r--r--nn/runtime/test/TestExecution.cpp374
-rw-r--r--nn/runtime/test/TestExtensions.cpp17
-rw-r--r--nn/runtime/test/TestFailingDriver.cpp17
-rw-r--r--nn/runtime/test/TestIntrospectionControl.cpp205
-rw-r--r--nn/runtime/test/TestMemoryDomain.cpp120
-rw-r--r--nn/runtime/test/TestPartitioning.cpp350
-rw-r--r--nn/runtime/test/TestPartitioningRandom.cpp126
-rw-r--r--nn/runtime/test/TestRemoveDefaultArguments.cpp9
-rw-r--r--nn/runtime/test/TestUnspecifiedDimensions.cpp3
-rw-r--r--nn/runtime/test/TestVersionedInterfaces.cpp541
-rw-r--r--nn/runtime/test/android_fuzzing/Converter.cpp31
-rw-r--r--nn/runtime/test/android_fuzzing/FuzzHarness.cpp2
-rw-r--r--nn/runtime/test/android_fuzzing/GenerateCorpus.cpp4
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp46
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciDriver.h10
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.cpp10
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h71
-rw-r--r--nn/runtime/test/fuzzing/RandomVariable.cpp2450
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp31
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h2
-rw-r--r--nn/tools/test_generator/test_harness/TestHarness.cpp12
-rw-r--r--nn/tools/test_generator/test_harness/include/TestHarness.h4
148 files changed, 6029 insertions, 5679 deletions
diff --git a/nn/common/Android.bp b/nn/common/Android.bp
index 202c69ed1..10f97a219 100644
--- a/nn/common/Android.bp
+++ b/nn/common/Android.bp
@@ -109,6 +109,12 @@ cc_library_static {
],
whole_static_libs: [
"libarect",
+ "neuralnetworks_types",
+ "neuralnetworks_utils_hal_1_0", // TODO(b/160669116): Remove VNDK dependencies.
+ "neuralnetworks_utils_hal_1_1",
+ "neuralnetworks_utils_hal_1_2",
+ "neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
],
cflags: [
"-DTF_LITE_DISABLE_X86_NEON",
@@ -204,6 +210,12 @@ cc_library_static {
whole_static_libs: [
"libarect",
"libtflite_kernel_utils",
+ "neuralnetworks_types",
+ "neuralnetworks_utils_hal_1_0", // TODO(b/160669116): Remove VNDK dependencies.
+ "neuralnetworks_utils_hal_1_1",
+ "neuralnetworks_utils_hal_1_2",
+ "neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
"philox_random",
],
static_libs: [
@@ -232,6 +244,12 @@ cc_defaults {
name: "neuralnetworks_utils_defaults",
host_supported: true,
vendor_available: true,
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.neuralnetworks",
+ "test_com.android.neuralnetworks",
+ ],
+ min_sdk_version: "30",
cflags: [
"-Wall",
"-Werror",
diff --git a/nn/common/BufferTracker.cpp b/nn/common/BufferTracker.cpp
index e6b8d947d..cb2a326f2 100644
--- a/nn/common/BufferTracker.cpp
+++ b/nn/common/BufferTracker.cpp
@@ -28,11 +28,10 @@
#include "CpuExecutor.h"
#include "HalInterfaces.h"
#include "Utils.h"
+#include "nnapi/TypeUtils.h"
namespace android::nn {
-using namespace hal;
-
std::shared_ptr<ManagedBuffer> ManagedBuffer::create(uint32_t size,
std::set<PreparedModelRole> roles,
const Operand& operand) {
@@ -40,7 +39,7 @@ std::shared_ptr<ManagedBuffer> ManagedBuffer::create(uint32_t size,
if (buffer == nullptr) {
return nullptr;
}
- if (isExtensionOperandType(operand.type)) {
+ if (isExtension(operand.type)) {
LOG(ERROR) << "ManagedBuffer cannot handle extension operands.";
return nullptr;
}
@@ -55,19 +54,18 @@ ManagedBuffer::ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
kOperandType(operand.type),
kInitialDimensions(operand.dimensions),
mUpdatedDimensions(operand.dimensions) {
- CHECK(!isExtensionOperandType(kOperandType));
+ CHECK(!isExtension(kOperandType));
}
ErrorStatus ManagedBuffer::validateRequest(uint32_t poolIndex, const Request& request,
- const IPreparedModel* preparedModel) const {
+ const V1_3::IPreparedModel* preparedModel) const {
CHECK_LT(poolIndex, request.pools.size());
- CHECK(request.pools[poolIndex].getDiscriminator() ==
- Request::MemoryPool::hidl_discriminator::token);
+ CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex]));
std::lock_guard<std::mutex> guard(mMutex);
bool usedAsInput = false, usedAsOutput = false;
for (uint32_t i = 0; i < request.inputs.size(); i++) {
- if (request.inputs[i].hasNoValue) continue;
+ if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
if (request.inputs[i].location.poolIndex != poolIndex) continue;
// Validate if the input role is specified during allocation.
if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
@@ -89,7 +87,7 @@ ErrorStatus ManagedBuffer::validateRequest(uint32_t poolIndex, const Request& re
usedAsInput = true;
}
for (uint32_t i = 0; i < request.outputs.size(); i++) {
- if (request.outputs[i].hasNoValue) continue;
+ if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
if (request.outputs[i].location.poolIndex != poolIndex) continue;
if (usedAsInput || usedAsOutput) {
LOG(ERROR) << "ManagedBuffer::validateRequest -- using the same device memory for "
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 5dd41ad8e..4ca9709d6 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -40,13 +40,14 @@
#include "Operations.h"
#include "OperationsUtils.h"
#include "Tracing.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-namespace {
+using ::android::hidl::memory::V1_0::IMemory;
-using namespace hal;
+namespace {
class OperationExecutionContext : public IOperationExecutionContext {
DISALLOW_IMPLICIT_CONSTRUCTORS(OperationExecutionContext);
@@ -59,7 +60,7 @@ class OperationExecutionContext : public IOperationExecutionContext {
OperandType getInputType(uint32_t index) const override;
Shape getInputShape(uint32_t index) const override;
const void* getInputBuffer(uint32_t index) const override;
- const OperandExtraParams getInputExtraParams(uint32_t index) const override;
+ const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override;
uint32_t getNumOutputs() const override;
OperandType getOutputType(uint32_t index) const override;
@@ -117,7 +118,7 @@ const void* OperationExecutionContext::getInputBuffer(uint32_t index) const {
return getInputInfo(index)->buffer;
}
-const OperandExtraParams OperationExecutionContext::getInputExtraParams(uint32_t index) const {
+const Operand::ExtraParams& OperationExecutionContext::getInputExtraParams(uint32_t index) const {
return getInputInfo(index)->extraParams;
}
@@ -154,7 +155,7 @@ int OperationExecutionContext::getResultCode() const {
bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape, int* result) {
// For user-provided model output operands, the parameters must match the Shape
// calculated from the preparation step.
- if (info->lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (info->lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
if (info->type != shape.type) {
LOG(ERROR) << "Invalid type for model output";
*result = ANEURALNETWORKS_OP_FAILED;
@@ -191,7 +192,7 @@ bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape, in
// TODO(b/153081229): We bypass the overflow check on extension operands because we do not know
// the sizes of extension types.
- if (!isExtensionOperandType(info->type) &&
+ if (!isExtension(info->type) &&
nonExtensionOperandSizeOfDataOverflowsUInt32(info->type, info->dimensions)) {
LOG(ERROR) << "Operand data size overflows uint32_t";
*result = ANEURALNETWORKS_OP_FAILED;
@@ -199,9 +200,9 @@ bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape, in
}
// Allocate the buffer only if the combined dimension is fully specified
- if (info->buffer == nullptr && (info->lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- info->lifetime == OperandLifeTime::SUBGRAPH_OUTPUT)) {
- if (isExtensionOperandType(info->type)) {
+ if (info->buffer == nullptr && (info->lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ info->lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT)) {
+ if (isExtension(info->type)) {
LOG(ERROR) << "Cannot allocate a variable of an extension type";
*result = ANEURALNETWORKS_OP_FAILED;
return false;
@@ -232,21 +233,21 @@ bool OperationExecutionContext::setOutputShape(uint32_t index, const Shape& shap
}
bool OperationExecutionContext::isOmittedInput(uint32_t index) const {
- return getInputInfo(index)->lifetime == OperandLifeTime::NO_VALUE;
+ return getInputInfo(index)->lifetime == Operand::LifeTime::NO_VALUE;
}
bool OperationExecutionContext::isOmittedOutput(uint32_t index) const {
- return getOutputInfo(index)->lifetime == OperandLifeTime::NO_VALUE;
+ return getOutputInfo(index)->lifetime == Operand::LifeTime::NO_VALUE;
}
bool OperationExecutionContext::checkNoOmittedOperand() const {
for (uint32_t i = 0; i < operation->inputs.size(); i++) {
- NN_RET_CHECK(!isOmittedInput(i)) << getOperationName(operation->type) << " input operand "
- << i << " is required but missing.";
+ NN_RET_CHECK(!isOmittedInput(i))
+ << operation->type << " input operand " << i << " is required but missing.";
}
for (uint32_t i = 0; i < operation->outputs.size(); i++) {
- NN_RET_CHECK(!isOmittedOutput(i)) << getOperationName(operation->type) << " output operand "
- << i << " is required but missing.";
+ NN_RET_CHECK(!isOmittedOutput(i))
+ << operation->type << " output operand " << i << " is required but missing.";
}
return true;
}
@@ -256,9 +257,8 @@ bool OperationExecutionContext::checkNoZeroSizedInput() const {
if (isOmittedInput(i)) continue;
for (uint32_t j = 0; j < getInputInfo(i)->dimensions.size(); j++) {
NN_RET_CHECK_NE(getInputInfo(i)->dimensions[j], 0)
- << getOperationName(operation->type)
- << " does not support zero-sized tensor, but input " << i << " dimension " << j
- << " is 0.";
+ << operation->type << " does not support zero-sized tensor, but input " << i
+ << " dimension " << j << " is 0.";
}
}
return true;
@@ -273,8 +273,8 @@ bool OperationExecutionContext::checkNoZeroSizedInput() const {
// when the RunTimePoolInfo is destroyed or is assigned to.
class RunTimePoolInfo::RunTimePoolInfoImpl {
public:
- RunTimePoolInfoImpl(const hidl_memory& hidlMemory, uint8_t* buffer, const sp<IMemory>& memory,
- AHardwareBuffer* hardwareBuffer, uint32_t size);
+ RunTimePoolInfoImpl(const hardware::hidl_memory& hidlMemory, uint8_t* buffer,
+ const sp<IMemory>& memory, AHardwareBuffer* hardwareBuffer, uint32_t size);
// rule of five...
~RunTimePoolInfoImpl();
@@ -288,10 +288,10 @@ class RunTimePoolInfo::RunTimePoolInfoImpl {
bool flush() const;
- const hidl_memory& getHidlMemory() const { return mHidlMemory; }
+ const hardware::hidl_memory& getHidlMemory() const { return mHidlMemory; }
private:
- const hidl_memory mHidlMemory; // always used
+ const hardware::hidl_memory mHidlMemory; // always used
uint8_t* const mBuffer = nullptr; // always used
const sp<IMemory> mMemory; // only used when hidlMemory.name() == "ashmem"
AHardwareBuffer*
@@ -299,7 +299,7 @@ class RunTimePoolInfo::RunTimePoolInfoImpl {
const uint32_t mSize;
};
-RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(const hidl_memory& hidlMemory,
+RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(const hardware::hidl_memory& hidlMemory,
uint8_t* buffer,
const sp<IMemory>& memory,
AHardwareBuffer* hardwareBuffer,
@@ -352,8 +352,8 @@ bool RunTimePoolInfo::RunTimePoolInfoImpl::flush() const {
// TODO: short term, make share memory mapping and updating a utility function.
// TODO: long term, implement mmap_fd as a hidl IMemory service.
-std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromHidlMemory(
- const hidl_memory& hidlMemory) {
+std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromMemory(const Memory& canonicalMemory) {
+ hardware::hidl_memory hidlMemory = convertToV1_0(canonicalMemory);
uint8_t* buffer = nullptr;
sp<IMemory> memory;
AHardwareBuffer* hardwareBuffer = nullptr;
@@ -423,8 +423,8 @@ std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromHidlMemory(
}
RunTimePoolInfo RunTimePoolInfo::createFromExistingBuffer(uint8_t* buffer, uint32_t size) {
- const auto impl = std::make_shared<const RunTimePoolInfoImpl>(hidl_memory{}, buffer, nullptr,
- nullptr, size);
+ const auto impl = std::make_shared<const RunTimePoolInfoImpl>(hardware::hidl_memory{}, buffer,
+ nullptr, nullptr, size);
return {impl};
}
@@ -443,17 +443,17 @@ bool RunTimePoolInfo::flush() const {
return mImpl->flush();
}
-const hidl_memory& RunTimePoolInfo::getHidlMemory() const {
- return mImpl->getHidlMemory();
+Memory RunTimePoolInfo::getMemory() const {
+ return uncheckedConvert(mImpl->getHidlMemory());
}
-bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
- const hidl_vec<hidl_memory>& pools) {
+bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos,
+ const std::vector<Memory>& pools) {
CHECK(poolInfos != nullptr);
poolInfos->clear();
poolInfos->reserve(pools.size());
for (const auto& pool : pools) {
- if (std::optional<RunTimePoolInfo> poolInfo = RunTimePoolInfo::createFromHidlMemory(pool)) {
+ if (std::optional<RunTimePoolInfo> poolInfo = RunTimePoolInfo::createFromMemory(pool)) {
poolInfos->push_back(*poolInfo);
} else {
LOG(ERROR) << "Could not map pools";
@@ -465,18 +465,18 @@ bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos
}
bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos,
- const hidl_vec<Request::MemoryPool>& pools) {
+ const std::vector<Request::MemoryPool>& pools) {
CHECK(poolInfos != nullptr);
poolInfos->clear();
poolInfos->reserve(pools.size());
for (const auto& pool : pools) {
- if (pool.getDiscriminator() != Request::MemoryPool::hidl_discriminator::hidlMemory) {
+ if (!std::holds_alternative<Memory>(pool)) {
LOG(ERROR) << "Unknown memory token";
poolInfos->clear();
return false;
}
if (std::optional<RunTimePoolInfo> poolInfo =
- RunTimePoolInfo::createFromHidlMemory(pool.hidlMemory())) {
+ RunTimePoolInfo::createFromMemory(std::get<Memory>(pool))) {
poolInfos->push_back(*poolInfo);
} else {
LOG(ERROR) << "Could not map pools";
@@ -522,7 +522,7 @@ static bool convertToNhwc(RunTimeOperandInfo& to, const RunTimeOperandInfo& from
LOG(ERROR) << "Error converting a non-4-D tensor to NHWC layout";
return false;
}
- to.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ to.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
if (data_layout) {
// convert dimensions
Shape inShape = from.shape();
@@ -628,7 +628,7 @@ static void consumeOperationInputs(const std::vector<uint32_t>& inputs,
// that are inputs to an operation.
static void freeUnusedSubgraphOperands(std::vector<RunTimeOperandInfo>* operands) {
for (auto& info : *operands) {
- if (info.lifetime == OperandLifeTime::TEMPORARY_VARIABLE && info.numberOfUsesLeft == 0 &&
+ if (info.lifetime == Operand::LifeTime::TEMPORARY_VARIABLE && info.numberOfUsesLeft == 0 &&
info.buffer != nullptr) {
delete[] info.buffer;
info.buffer = nullptr;
@@ -642,8 +642,8 @@ int CpuExecutor::run(const Model& model, const Request& request,
const std::vector<RunTimePoolInfo>& modelPoolInfos,
const std::vector<RunTimePoolInfo>& requestPoolInfos) {
NNTRACE_CPU(NNTRACE_PHASE_EXECUTION, "run");
- VLOG(CPUEXE) << "CpuExecutor::run() with request(" << SHOW_IF_DEBUG(toString(request)) << ")";
- mModelOperandValues = &model.operandValues;
+ VLOG(CPUEXE) << "CpuExecutor::run() with request(" << SHOW_IF_DEBUG(request) << ")";
+ mModelOperandValues = model.operandValues.data();
mModelPoolInfos = &modelPoolInfos;
mReferencedSubgraphs = &model.referenced;
@@ -680,8 +680,8 @@ int CpuExecutor::run(const Model& model, const Request& request,
return result;
}
-int CpuExecutor::executeSubgraph(const Subgraph& subgraph, RunTimeOperandInfo* operands) {
- VLOG(CPUEXE) << "CpuExecutor::executeSubgraph " << toString(subgraph);
+int CpuExecutor::executeSubgraph(const Model::Subgraph& subgraph, RunTimeOperandInfo* operands) {
+ VLOG(CPUEXE) << "CpuExecutor::executeSubgraph " << subgraph;
// The graph has serialized the operation in execution order.
for (const auto& operation : subgraph.operations) {
NN_RETURN_IF_ERROR(executeOperation(operation, operands));
@@ -689,10 +689,12 @@ int CpuExecutor::executeSubgraph(const Subgraph& subgraph, RunTimeOperandInfo* o
return ANEURALNETWORKS_NO_ERROR;
}
-std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(const Subgraph& subgraph) {
+std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(
+ const Model::Subgraph& subgraph) {
VLOG(CPUEXE) << "CpuExecutor::initializeRunTimeInfo";
const size_t count = subgraph.operands.size();
std::vector<RunTimeOperandInfo> operands(count);
+ std::vector<uint32_t> numberOfConsumers = countNumberOfConsumers(count, subgraph.operations);
for (size_t i = 0; i < count; i++) {
const Operand& from = subgraph.operands[i];
RunTimeOperandInfo& to = operands[i];
@@ -704,15 +706,15 @@ std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(const Subgrap
to.lifetime = from.lifetime;
to.extraParams = from.extraParams;
switch (from.lifetime) {
- case OperandLifeTime::TEMPORARY_VARIABLE:
+ case Operand::LifeTime::TEMPORARY_VARIABLE:
to.buffer = nullptr;
- to.numberOfUsesLeft = from.numberOfConsumers;
+ to.numberOfUsesLeft = numberOfConsumers[i];
break;
- case OperandLifeTime::CONSTANT_COPY:
- to.buffer = const_cast<uint8_t*>(&(*mModelOperandValues)[from.location.offset]);
+ case Operand::LifeTime::CONSTANT_COPY:
+ to.buffer = const_cast<uint8_t*>(mModelOperandValues + from.location.offset);
to.numberOfUsesLeft = 0;
break;
- case OperandLifeTime::CONSTANT_REFERENCE: {
+ case Operand::LifeTime::CONSTANT_REFERENCE: {
auto poolIndex = from.location.poolIndex;
CHECK_LT(poolIndex, mModelPoolInfos->size());
auto& r = (*mModelPoolInfos)[poolIndex];
@@ -720,16 +722,21 @@ std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(const Subgrap
to.numberOfUsesLeft = 0;
break;
}
- case OperandLifeTime::SUBGRAPH: {
+ case Operand::LifeTime::SUBGRAPH: {
auto subgraphIndex = from.location.offset;
CHECK_LT(subgraphIndex, mReferencedSubgraphs->size());
to.buffer = reinterpret_cast<uint8_t*>(
- const_cast<Subgraph*>(&(*mReferencedSubgraphs)[subgraphIndex]));
+ const_cast<Model::Subgraph*>(&(*mReferencedSubgraphs)[subgraphIndex]));
+ to.numberOfUsesLeft = 0;
+ } break;
+ case Operand::LifeTime::POINTER: {
+ to.buffer = reinterpret_cast<uint8_t*>(
+ const_cast<void*>(std::get<const void*>(from.location.pointer)));
to.numberOfUsesLeft = 0;
} break;
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
- case OperandLifeTime::NO_VALUE:
+ case Operand::LifeTime::SUBGRAPH_INPUT:
+ case Operand::LifeTime::SUBGRAPH_OUTPUT:
+ case Operand::LifeTime::NO_VALUE:
to.buffer = nullptr;
to.numberOfUsesLeft = 0;
break;
@@ -739,15 +746,15 @@ std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(const Subgrap
}
void CpuExecutor::updateForArguments(const std::vector<uint32_t>& indexes,
- const hal::hidl_vec<hal::RequestArgument>& arguments,
+ const std::vector<Request::Argument>& arguments,
const std::vector<RunTimePoolInfo>& requestPoolInfos,
RunTimeOperandInfo* operands) {
CHECK_EQ(indexes.size(), arguments.size());
for (size_t i = 0; i < indexes.size(); i++) {
const uint32_t operandIndex = indexes[i];
- const RequestArgument& from = arguments[i];
+ const Request::Argument& from = arguments[i];
RunTimeOperandInfo& to = operands[operandIndex];
- if (from.dimensions.size() > 0) {
+ if (!from.dimensions.empty()) {
// It's the responsibility of the caller to validate that
// from.dimensions only modifies the dimensions that were
// unspecified in the model. That's the case in SampleDriver.cpp
@@ -755,8 +762,8 @@ void CpuExecutor::updateForArguments(const std::vector<uint32_t>& indexes,
// TODO make sure that's the case for the default CPU path.
to.dimensions = from.dimensions;
}
- if (from.hasNoValue) {
- to.lifetime = OperandLifeTime::NO_VALUE;
+ if (from.lifetime == Request::Argument::LifeTime::NO_VALUE) {
+ to.lifetime = Operand::LifeTime::NO_VALUE;
CHECK(to.buffer == nullptr);
to.length = 0;
} else {
@@ -793,9 +800,9 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
return result;
}
- // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << toString(operation) << ")";
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << operation << ")";
+ const std::vector<uint32_t>& ins = operation.inputs;
+ const std::vector<uint32_t>& outs = operation.outputs;
bool success = false;
int result = ANEURALNETWORKS_NO_ERROR;
@@ -807,29 +814,30 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
auto allParametersPresent = [&operation, &operands, &ins, &outs](size_t requiredIns,
size_t requiredOuts) -> bool {
auto verify = [&operation, &operands](size_t requiredCount,
- const hidl_vec<uint32_t>& indexes,
+ const std::vector<uint32_t>& indexes,
const char* type) -> bool {
size_t actualCount = indexes.size();
if (actualCount != requiredCount) {
- LOG(ERROR) << getOperationName(operation.type) << ": Invalid number of " << type
- << " operands. Got " << actualCount << " of " << requiredCount;
+ LOG(ERROR) << operation.type << ": Invalid number of " << type << " operands. Got "
+ << actualCount << " of " << requiredCount;
return false;
}
for (size_t i = 0; i < actualCount; i++) {
- if (operands[indexes[i]].lifetime == OperandLifeTime::NO_VALUE) {
- LOG(ERROR) << getOperationName(operation.type) << " " << type << " operand "
- << i << " is required but missing.";
+ if (operands[indexes[i]].lifetime == Operand::LifeTime::NO_VALUE) {
+ LOG(ERROR) << operation.type << " " << type << " operand " << i
+ << " is required but missing.";
return false;
}
}
return true;
};
- auto verifyNoZeroSizedInputs = [&operation, &operands](const hidl_vec<uint32_t>& indexes) {
+ auto verifyNoZeroSizedInputs = [&operation,
+ &operands](const std::vector<uint32_t>& indexes) {
for (size_t i = 0; i < indexes.size(); i++) {
for (size_t j = 0; j < operands[indexes[i]].dimensions.size(); j++) {
if (operands[indexes[i]].dimensions[j] == 0) {
- LOG(ERROR) << getOperationName(operation.type)
+ LOG(ERROR) << operation.type
<< " does not support zero-sized tensor, but input " << i
<< " dimension " << j << " is zero.";
return false;
@@ -882,7 +890,7 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
if (!depthToSpacePrepare(input_tmp.shape(), blockSize, &outShape) ||
@@ -946,7 +954,7 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1167,7 +1175,7 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1239,7 +1247,7 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1558,7 +1566,7 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = false;
break;
}
- output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE;
output_tmp.buffer = data_layout ? nullptr : output.buffer;
output_tmp.length = data_layout ? 0 : output.length;
@@ -1605,7 +1613,8 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = groupedConvQuant8PerChannel(
reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(),
reinterpret_cast<const int8_t*>(filter.buffer), filter.shape(),
- filter.extraParams.channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(filter.extraParams)
+ .scales.data(),
reinterpret_cast<const int32_t*>(bias.buffer), bias.shape(),
padding_left, padding_right, padding_top, padding_bottom, stride_width,
stride_height, numGroups, activation,
@@ -1624,7 +1633,8 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
success = groupedConvQuant8PerChannel(
reinterpret_cast<const int8_t*>(input_tmp.buffer), input_tmp.shape(),
reinterpret_cast<const int8_t*>(filter.buffer), filter.shape(),
- filter.extraParams.channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(filter.extraParams)
+ .scales.data(),
reinterpret_cast<const int32_t*>(bias.buffer), bias.shape(),
padding_left, padding_right, padding_top, padding_bottom, stride_width,
stride_height, numGroups, activation,
@@ -1703,11 +1713,10 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
const OperationRegistration* operationRegistration =
mOperationResolver->findOperation(operation.type);
if (operationRegistration == nullptr) {
- LOG(ERROR) << getOperationName(operation.type) << " not registered";
+ LOG(ERROR) << operation.type << " not registered";
} else if (operationRegistration->prepare == nullptr ||
operationRegistration->execute == nullptr) {
- LOG(ERROR) << "Incomplete operation registration: "
- << getOperationName(operation.type);
+ LOG(ERROR) << "Incomplete operation registration: " << operation.type;
} else {
OperationExecutionContext context(&operation, operands);
success = operationRegistration->flags.allowOmittedOperand ||
@@ -1724,7 +1733,7 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
result = ANEURALNETWORKS_OP_FAILED;
}
if (result != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << getOperationName(operation.type) << " failed.";
+ LOG(ERROR) << operation.type << " failed.";
}
consumeOperationInputs(ins, operands);
@@ -1753,7 +1762,8 @@ int CpuExecutor::executeIfOperation(const Operation& operation, RunTimeOperandIn
const uint32_t branchInputIndex = condValue ? op::kThenModelOperand : op::kElseModelOperand;
const RunTimeOperandInfo& branchOperand = operands[operation.inputs[branchInputIndex]];
- const Subgraph& branchSubgraph = *reinterpret_cast<const Subgraph*>(branchOperand.buffer);
+ const Model::Subgraph& branchSubgraph =
+ *reinterpret_cast<const Model::Subgraph*>(branchOperand.buffer);
std::vector<RunTimeOperandInfo> branchOperands = initializeRunTimeInfo(branchSubgraph);
// Initialize inner input and output operands from outer operands.
@@ -1783,8 +1793,10 @@ int CpuExecutor::executeWhileOperation(const Operation& operation, RunTimeOperan
namespace op = operation_while;
const RunTimeOperandInfo& condModelOperand = operands[operation.inputs[op::kCondModelOperand]];
const RunTimeOperandInfo& bodyModelOperand = operands[operation.inputs[op::kBodyModelOperand]];
- const Subgraph& condSubgraph = *reinterpret_cast<const Subgraph*>(condModelOperand.buffer);
- const Subgraph& bodySubgraph = *reinterpret_cast<const Subgraph*>(bodyModelOperand.buffer);
+ const Model::Subgraph& condSubgraph =
+ *reinterpret_cast<const Model::Subgraph*>(condModelOperand.buffer);
+ const Model::Subgraph& bodySubgraph =
+ *reinterpret_cast<const Model::Subgraph*>(bodyModelOperand.buffer);
std::vector<RunTimeOperandInfo> condOperands = initializeRunTimeInfo(condSubgraph);
std::vector<RunTimeOperandInfo> bodyOperands = initializeRunTimeInfo(bodySubgraph);
@@ -1916,7 +1928,7 @@ void CpuExecutor::setOutputShapes(const std::vector<uint32_t>& outputIndexes,
mOutputShapes[i].dimensions = from.dimensions;
mOutputShapes[i].isSufficient = from.isSufficient();
VLOG(EXECUTION) << "CpuExecutor::setOutputShapes: mOutputShapes[" << i
- << "] = " << toString(mOutputShapes[i]);
+ << "] = " << mOutputShapes[i];
}
}
diff --git a/nn/common/ExecutionBurstController.cpp b/nn/common/ExecutionBurstController.cpp
index e195e7d31..bb1c08f6b 100644
--- a/nn/common/ExecutionBurstController.cpp
+++ b/nn/common/ExecutionBurstController.cpp
@@ -36,8 +36,6 @@
namespace android::nn {
namespace {
-using namespace hal;
-
using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
@@ -45,10 +43,10 @@ using V1_2::IBurstContext;
using FmqRequestDescriptor = hardware::MQDescriptorSync<FmqRequestDatum>;
using FmqResultDescriptor = hardware::MQDescriptorSync<FmqResultDatum>;
-constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
- std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming12 = {std::numeric_limits<uint64_t>::max(),
+ std::numeric_limits<uint64_t>::max()};
-class BurstContextDeathHandler : public hidl_death_recipient {
+class BurstContextDeathHandler : public hardware::hidl_death_recipient {
public:
using Callback = std::function<void()>;
@@ -68,7 +66,7 @@ class BurstContextDeathHandler : public hidl_death_recipient {
} // anonymous namespace
// serialize a request into a packet
-std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, MeasureTiming measure,
+std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, V1_2::MeasureTiming measure,
const std::vector<int32_t>& slots) {
// count how many elements need to be sent for a request
size_t count = 2 + request.inputs.size() + request.outputs.size() + request.pools.size();
@@ -149,11 +147,11 @@ std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, MeasureTimi
}
// deserialize a packet into the result
-std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>> deserialize(
- const std::vector<FmqResultDatum>& data) {
+std::optional<std::tuple<V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, V1_2::Timing>>
+deserialize(const std::vector<FmqResultDatum>& data) {
using discriminator = FmqResultDatum::hidl_discriminator;
- std::vector<OutputShape> outputShapes;
+ std::vector<V1_2::OutputShape> outputShapes;
size_t index = 0;
// validate packet information
@@ -218,7 +216,7 @@ std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>> d
}
// unpackage execution timing
- const Timing timing = data[index].executionTiming();
+ const V1_2::Timing timing = data[index].executionTiming();
index++;
// validate packet information
@@ -254,7 +252,7 @@ ResultChannelReceiver::ResultChannelReceiver(std::unique_ptr<FmqResultChannel> f
std::chrono::microseconds pollingTimeWindow)
: mFmqResultChannel(std::move(fmqResultChannel)), kPollingTimeWindow(pollingTimeWindow) {}
-std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>>
+std::optional<std::tuple<V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, V1_2::Timing>>
ResultChannelReceiver::getBlocking() {
const auto packet = getPacketBlocking();
if (!packet) {
@@ -275,7 +273,8 @@ void ResultChannelReceiver::invalidate() {
// TODO: look for a different/better way to signal/notify the futex to
// wake up any thread waiting on it
FmqResultDatum datum;
- datum.packetInformation({/*.packetSize=*/0, /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE,
+ datum.packetInformation({/*.packetSize=*/0,
+ /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE,
/*.numberOfOperands=*/0});
mFmqResultChannel->writeBlocking(&datum, 1);
}
@@ -363,7 +362,7 @@ RequestChannelSender::create(size_t channelLength) {
RequestChannelSender::RequestChannelSender(std::unique_ptr<FmqRequestChannel> fmqRequestChannel)
: mFmqRequestChannel(std::move(fmqRequestChannel)) {}
-bool RequestChannelSender::send(const V1_0::Request& request, MeasureTiming measure,
+bool RequestChannelSender::send(const V1_0::Request& request, V1_2::MeasureTiming measure,
const std::vector<int32_t>& slots) {
const std::vector<FmqRequestDatum> serialized = serialize(request, measure, slots);
return sendPacket(serialized);
@@ -389,30 +388,31 @@ void RequestChannelSender::invalidate() {
mValid = false;
}
-Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
- const hidl_vec<int32_t>& slots, getMemories_cb cb) {
+hardware::Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
+ const hardware::hidl_vec<int32_t>& slots, getMemories_cb cb) {
std::lock_guard<std::mutex> guard(mMutex);
// get all memories
- hidl_vec<hidl_memory> memories(slots.size());
+ hardware::hidl_vec<hardware::hidl_memory> memories(slots.size());
std::transform(slots.begin(), slots.end(), memories.begin(), [this](int32_t slot) {
- return slot < mMemoryCache.size() ? mMemoryCache[slot] : hidl_memory{};
+ return slot < mMemoryCache.size() ? mMemoryCache[slot] : hardware::hidl_memory{};
});
// ensure all memories are valid
if (!std::all_of(memories.begin(), memories.end(),
- [](const hidl_memory& memory) { return memory.valid(); })) {
+ [](const hardware::hidl_memory& memory) { return memory.valid(); })) {
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
// return successful
cb(V1_0::ErrorStatus::NONE, std::move(memories));
- return Void();
+ return hardware::Void();
}
std::vector<int32_t> ExecutionBurstController::ExecutionBurstCallback::getSlots(
- const hidl_vec<hidl_memory>& memories, const std::vector<intptr_t>& keys) {
+ const hardware::hidl_vec<hardware::hidl_memory>& memories,
+ const std::vector<intptr_t>& keys) {
std::lock_guard<std::mutex> guard(mMutex);
// retrieve (or bind) all slots corresponding to memories
@@ -439,8 +439,8 @@ std::pair<bool, int32_t> ExecutionBurstController::ExecutionBurstCallback::freeM
return {true, slot};
}
-int32_t ExecutionBurstController::ExecutionBurstCallback::getSlotLocked(const hidl_memory& memory,
- intptr_t key) {
+int32_t ExecutionBurstController::ExecutionBurstCallback::getSlotLocked(
+ const hardware::hidl_memory& memory, intptr_t key) {
auto iter = mMemoryIdToSlot.find(key);
if (iter == mMemoryIdToSlot.end()) {
const int32_t slot = allocateSlotLocked();
@@ -503,7 +503,7 @@ std::unique_ptr<ExecutionBurstController> ExecutionBurstController::create(
// configure burst
V1_0::ErrorStatus errorStatus;
sp<IBurstContext> burstContext;
- const Return<void> ret = preparedModel->configureExecutionBurst(
+ const hardware::Return<void> ret = preparedModel->configureExecutionBurst(
callback, *requestChannelDescriptor, *resultChannelDescriptor,
[&errorStatus, &burstContext](V1_0::ErrorStatus status,
const sp<IBurstContext>& context) {
@@ -539,7 +539,7 @@ std::unique_ptr<ExecutionBurstController> ExecutionBurstController::create(
// proactively handle service crashes. If the linkToDeath call fails,
// asynchronous calls are susceptible to hangs if the service crashes before
// providing the response.
- const Return<bool> deathHandlerRet = burstContext->linkToDeath(deathHandler, 0);
+ const hardware::Return<bool> deathHandlerRet = burstContext->linkToDeath(deathHandler, 0);
if (!deathHandlerRet.isOk() || deathHandlerRet != true) {
LOG(ERROR) << "ExecutionBurstController::create -- Failed to register a death recipient "
"for the IBurstContext object.";
@@ -555,7 +555,7 @@ ExecutionBurstController::ExecutionBurstController(
const std::shared_ptr<RequestChannelSender>& requestChannelSender,
const std::shared_ptr<ResultChannelReceiver>& resultChannelReceiver,
const sp<IBurstContext>& burstContext, const sp<ExecutionBurstCallback>& callback,
- const sp<hidl_death_recipient>& deathHandler)
+ const sp<hardware::hidl_death_recipient>& deathHandler)
: mRequestChannelSender(requestChannelSender),
mResultChannelReceiver(resultChannelReceiver),
mBurstContext(burstContext),
@@ -572,17 +572,17 @@ ExecutionBurstController::~ExecutionBurstController() {
}
}
-static std::tuple<int, std::vector<OutputShape>, Timing, bool> getExecutionResult(
- V1_0::ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing,
+static std::tuple<int, std::vector<V1_2::OutputShape>, V1_2::Timing, bool> getExecutionResult(
+ V1_0::ErrorStatus status, std::vector<V1_2::OutputShape> outputShapes, V1_2::Timing timing,
bool fallback) {
auto [n, checkedOutputShapes, checkedTiming] =
getExecutionResult(convertToV1_3(status), std::move(outputShapes), timing);
- return {n, std::move(checkedOutputShapes), checkedTiming, fallback};
+ return {n, convertToV1_2(checkedOutputShapes), convertToV1_2(checkedTiming), fallback};
}
-std::tuple<int, std::vector<OutputShape>, Timing, bool> ExecutionBurstController::compute(
- const V1_0::Request& request, MeasureTiming measure,
- const std::vector<intptr_t>& memoryIds) {
+std::tuple<int, std::vector<V1_2::OutputShape>, V1_2::Timing, bool>
+ExecutionBurstController::compute(const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const std::vector<intptr_t>& memoryIds) {
// This is the first point when we know an execution is occurring, so begin
// to collect systraces. Note that the first point we can begin collecting
// systraces in ExecutionBurstServer is when the RequestChannelReceiver
@@ -598,7 +598,7 @@ std::tuple<int, std::vector<OutputShape>, Timing, bool> ExecutionBurstController
if (!success) {
LOG(ERROR) << "Error sending FMQ packet";
// only use fallback execution path if the packet could not be sent
- return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming,
+ return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12,
/*fallback=*/true);
}
@@ -607,7 +607,7 @@ std::tuple<int, std::vector<OutputShape>, Timing, bool> ExecutionBurstController
if (!result) {
LOG(ERROR) << "Error retrieving FMQ packet";
// only use fallback execution path if the packet could not be sent
- return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming,
+ return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12,
/*fallback=*/false);
}
diff --git a/nn/common/ExecutionBurstServer.cpp b/nn/common/ExecutionBurstServer.cpp
index 487cd9fad..67d4ccbda 100644
--- a/nn/common/ExecutionBurstServer.cpp
+++ b/nn/common/ExecutionBurstServer.cpp
@@ -35,16 +35,14 @@
namespace android::nn {
namespace {
-using namespace hal;
-
using hardware::MQDescriptorSync;
using V1_2::FmqRequestDatum;
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
using V1_2::IBurstContext;
-constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
- std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
+ std::numeric_limits<uint64_t>::max()};
// DefaultBurstExecutorWithCache adapts an IPreparedModel so that it can be
// used as an IBurstExecutorWithCache. Specifically, the cache simply stores the
@@ -61,17 +59,17 @@ class DefaultBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecuto
return (it != mMemoryCache.end()) && it->second.valid();
}
- void addCacheEntry(const hidl_memory& memory, int32_t slot) override {
+ void addCacheEntry(const hardware::hidl_memory& memory, int32_t slot) override {
mMemoryCache[slot] = memory;
}
void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); }
- std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
+ std::tuple<V1_0::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> execute(
const V1_0::Request& request, const std::vector<int32_t>& slots,
- MeasureTiming measure) override {
+ V1_2::MeasureTiming measure) override {
// convert slots to pools
- hidl_vec<hidl_memory> pools(slots.size());
+ hardware::hidl_vec<hardware::hidl_memory> pools(slots.size());
std::transform(slots.begin(), slots.end(), pools.begin(),
[this](int32_t slot) { return mMemoryCache[slot]; });
@@ -81,18 +79,20 @@ class DefaultBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecuto
// setup execution
V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- hidl_vec<OutputShape> returnedOutputShapes;
- Timing returnedTiming;
+ hardware::hidl_vec<V1_2::OutputShape> returnedOutputShapes;
+ V1_2::Timing returnedTiming;
auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](
- V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
+ V1_0::ErrorStatus status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
returnedStatus = status;
returnedOutputShapes = outputShapes;
returnedTiming = timing;
};
// execute
- const Return<void> ret = mpPreparedModel->executeSynchronously(fullRequest, measure, cb);
+ const hardware::Return<void> ret =
+ mpPreparedModel->executeSynchronously(fullRequest, measure, cb);
if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) {
LOG(ERROR) << "IPreparedModelAdapter::execute -- Error executing";
return {returnedStatus, std::move(returnedOutputShapes), kNoTiming};
@@ -103,14 +103,15 @@ class DefaultBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecuto
private:
V1_2::IPreparedModel* const mpPreparedModel;
- std::map<int32_t, hidl_memory> mMemoryCache;
+ std::map<int32_t, hardware::hidl_memory> mMemoryCache;
};
} // anonymous namespace
// serialize result
std::vector<FmqResultDatum> serialize(V1_0::ErrorStatus errorStatus,
- const std::vector<OutputShape>& outputShapes, Timing timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
// count how many elements need to be sent for a request
size_t count = 2 + outputShapes.size();
for (const auto& outputShape : outputShapes) {
@@ -161,7 +162,7 @@ std::vector<FmqResultDatum> serialize(V1_0::ErrorStatus errorStatus,
}
// deserialize request
-std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> deserialize(
+std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, V1_2::MeasureTiming>> deserialize(
const std::vector<FmqRequestDatum>& data) {
using discriminator = FmqRequestDatum::hidl_discriminator;
@@ -188,7 +189,7 @@ std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> de
}
// unpackage input operands
- std::vector<RequestArgument> inputs;
+ std::vector<V1_0::RequestArgument> inputs;
inputs.reserve(numberOfInputOperands);
for (size_t operand = 0; operand < numberOfInputOperands; ++operand) {
// validate input operand information
@@ -202,7 +203,7 @@ std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> de
data[index].inputOperandInformation();
index++;
const bool hasNoValue = operandInfo.hasNoValue;
- const DataLocation location = operandInfo.location;
+ const V1_0::DataLocation location = operandInfo.location;
const uint32_t numberOfDimensions = operandInfo.numberOfDimensions;
// unpackage operand dimensions
@@ -229,7 +230,7 @@ std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> de
}
// unpackage output operands
- std::vector<RequestArgument> outputs;
+ std::vector<V1_0::RequestArgument> outputs;
outputs.reserve(numberOfOutputOperands);
for (size_t operand = 0; operand < numberOfOutputOperands; ++operand) {
// validate output operand information
@@ -243,7 +244,7 @@ std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> de
data[index].outputOperandInformation();
index++;
const bool hasNoValue = operandInfo.hasNoValue;
- const DataLocation location = operandInfo.location;
+ const V1_0::DataLocation location = operandInfo.location;
const uint32_t numberOfDimensions = operandInfo.numberOfDimensions;
// unpackage operand dimensions
@@ -294,7 +295,7 @@ std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> de
}
// unpackage measureTiming
- const MeasureTiming measure = data[index].measureTiming();
+ const V1_2::MeasureTiming measure = data[index].measureTiming();
index++;
// validate packet information
@@ -333,7 +334,7 @@ RequestChannelReceiver::RequestChannelReceiver(std::unique_ptr<FmqRequestChannel
std::chrono::microseconds pollingTimeWindow)
: mFmqRequestChannel(std::move(fmqRequestChannel)), kPollingTimeWindow(pollingTimeWindow) {}
-std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>>
+std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, V1_2::MeasureTiming>>
RequestChannelReceiver::getBlocking() {
const auto packet = getPacketBlocking();
if (!packet) {
@@ -463,7 +464,8 @@ ResultChannelSender::ResultChannelSender(std::unique_ptr<FmqResultChannel> fmqRe
: mFmqResultChannel(std::move(fmqResultChannel)) {}
bool ResultChannelSender::send(V1_0::ErrorStatus errorStatus,
- const std::vector<OutputShape>& outputShapes, Timing timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
const std::vector<FmqResultDatum> serialized = serialize(errorStatus, outputShapes, timing);
return sendPacket(serialized);
}
@@ -555,10 +557,10 @@ ExecutionBurstServer::~ExecutionBurstServer() {
mWorker.join();
}
-Return<void> ExecutionBurstServer::freeMemory(int32_t slot) {
+hardware::Return<void> ExecutionBurstServer::freeMemory(int32_t slot) {
std::lock_guard<std::mutex> hold(mMutex);
mExecutorWithCache->removeCacheEntry(slot);
- return Void();
+ return hardware::Void();
}
void ExecutionBurstServer::ensureCacheEntriesArePresentLocked(const std::vector<int32_t>& slots) {
@@ -580,14 +582,15 @@ void ExecutionBurstServer::ensureCacheEntriesArePresentLocked(const std::vector<
}
V1_0::ErrorStatus errorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
- std::vector<hidl_memory> returnedMemories;
- auto cb = [&errorStatus, &returnedMemories](V1_0::ErrorStatus status,
- const hidl_vec<hidl_memory>& memories) {
+ std::vector<hardware::hidl_memory> returnedMemories;
+ auto cb = [&errorStatus, &returnedMemories](
+ V1_0::ErrorStatus status,
+ const hardware::hidl_vec<hardware::hidl_memory>& memories) {
errorStatus = status;
returnedMemories = memories;
};
- const Return<void> ret = mCallback->getMemories(unknownSlots, cb);
+ const hardware::Return<void> ret = mCallback->getMemories(unknownSlots, cb);
if (!ret.isOk() || errorStatus != V1_0::ErrorStatus::NONE ||
returnedMemories.size() != unknownSlots.size()) {
diff --git a/nn/common/GraphDump.cpp b/nn/common/GraphDump.cpp
index 3c208cd88..146e1c6cb 100644
--- a/nn/common/GraphDump.cpp
+++ b/nn/common/GraphDump.cpp
@@ -18,9 +18,8 @@
#include "GraphDump.h"
-#include "HalInterfaces.h"
-
#include <android-base/logging.h>
+
#include <algorithm>
#include <iostream>
#include <map>
@@ -28,11 +27,11 @@
#include <string>
#include <utility>
+#include "Utils.h"
+
namespace android {
namespace nn {
-using namespace hal;
-
// class Dumper is a wrapper around an std::ostream (if instantiated
// with a pointer to a stream) or around LOG(INFO) (otherwise).
//
@@ -112,25 +111,40 @@ static std::string translate(OperandType type) {
return "OEM";
case OperandType::TENSOR_OEM_BYTE:
return "TOEMB";
- default:
- return toString(type);
+ default: {
+ std::ostringstream oss;
+ oss << type;
+ return oss.str();
+ }
}
}
// If the specified Operand of the specified Model has OperandType
// nnType corresponding to C++ type cppType and is of
-// OperandLifeTime::CONSTANT_COPY, then write the Operand's value to
+// Operand::LifeTime::CONSTANT_COPY, then write the Operand's value to
// the Dumper.
namespace {
template <OperandType nnType, typename cppType>
void tryValueDump(Dumper& dump, const Model& model, const Operand& opnd) {
- if (opnd.type != nnType || opnd.lifetime != OperandLifeTime::CONSTANT_COPY ||
- opnd.location.length != sizeof(cppType)) {
+ if (opnd.type != nnType) {
+ return;
+ }
+
+ const void* pointer = nullptr;
+ if (opnd.lifetime == Operand::LifeTime::CONSTANT_COPY) {
+ pointer = model.operandValues.data() + opnd.location.offset;
+ } else if (opnd.lifetime == Operand::LifeTime::POINTER) {
+ pointer = std::get<const void*>(opnd.location.pointer);
+ } else {
+ return;
+ }
+
+ if (opnd.location.length != sizeof(cppType)) {
return;
}
cppType val;
- memcpy(&val, &model.operandValues[opnd.location.offset], sizeof(cppType));
+ memcpy(&val, pointer, sizeof(cppType));
dump << " = " << val;
}
} // namespace
@@ -172,25 +186,28 @@ void graphDump(const char* name, const Model& model, std::ostream* outStream) {
const char* kind = nullptr;
const char* io = nullptr;
switch (opnd.lifetime) {
- case OperandLifeTime::CONSTANT_COPY:
+ case Operand::LifeTime::CONSTANT_COPY:
kind = "COPY";
break;
- case OperandLifeTime::CONSTANT_REFERENCE:
+ case Operand::LifeTime::CONSTANT_REFERENCE:
kind = "REF";
break;
- case OperandLifeTime::SUBGRAPH_INPUT:
+ case Operand::LifeTime::SUBGRAPH_INPUT:
io = "input";
break;
- case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case Operand::LifeTime::SUBGRAPH_OUTPUT:
io = "output";
break;
- case OperandLifeTime::NO_VALUE:
+ case Operand::LifeTime::NO_VALUE:
kind = "NO";
break;
- case OperandLifeTime::SUBGRAPH:
+ case Operand::LifeTime::SUBGRAPH:
kind = "SUBGRAPH";
break;
- default:
+ case Operand::LifeTime::POINTER:
+ kind = "POINTER";
+ break;
+ case Operand::LifeTime::TEMPORARY_VARIABLE:
// nothing interesting
break;
}
@@ -205,7 +222,7 @@ void graphDump(const char* name, const Model& model, std::ostream* outStream) {
tryValueDump<OperandType::FLOAT32, float>(dump, model, opnd);
tryValueDump<OperandType::INT32, int>(dump, model, opnd);
tryValueDump<OperandType::UINT32, unsigned>(dump, model, opnd);
- if (opnd.dimensions.size()) {
+ if (!opnd.dimensions.empty()) {
dump << "(";
for (unsigned i = 0, e = opnd.dimensions.size(); i < e; i++) {
if (i > 0) {
@@ -230,7 +247,7 @@ void graphDump(const char* name, const Model& model, std::ostream* outStream) {
dump << " ordering=out";
}
}
- dump << " label=\"" << i << ": " << toString(operation.type) << "\"]" << Dumper::endl;
+ dump << " label=\"" << i << ": " << operation.type << "\"]" << Dumper::endl;
{
// operation inputs
for (unsigned in = 0, inE = operation.inputs.size(); in < inE; in++) {
diff --git a/nn/common/MetaModel.cpp b/nn/common/MetaModel.cpp
index 30d88a181..81d12829d 100644
--- a/nn/common/MetaModel.cpp
+++ b/nn/common/MetaModel.cpp
@@ -24,6 +24,7 @@
#include <sstream>
#include <type_traits>
#include <utility>
+#include <vector>
#include "GraphDump.h"
#include "HalInterfaces.h"
@@ -31,14 +32,12 @@
namespace android::nn {
-using namespace hal;
-
namespace {
// Add an element to the end of the vector and return a pair consisting of the
// index of the new element and a pointer to the new element.
template <class T>
-std::pair<uint32_t, T*> extend(hidl_vec<T>* vec) {
+std::pair<uint32_t, T*> extend(hardware::hidl_vec<T>* vec) {
size_t nextIndex = vec->size();
vec->resize(nextIndex + 1);
return {nextIndex, &(*vec)[nextIndex]};
@@ -48,14 +47,14 @@ std::pair<uint32_t, T*> extend(hidl_vec<T>* vec) {
// return a pair consisting of the index of the new element and a pointer to the
// new element.
template <class T>
-std::pair<uint32_t, T*> extend(hidl_vec<T>* vec, const T& val) {
+std::pair<uint32_t, T*> extend(hardware::hidl_vec<T>* vec, const T& val) {
auto extended = extend(vec);
*extended.second = val;
return extended;
}
template <typename T>
-bool operator<(const hidl_vec<T>& a, const hidl_vec<T>& b) {
+bool operator<(const hardware::hidl_vec<T>& a, const hardware::hidl_vec<T>& b) {
return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
}
@@ -63,19 +62,19 @@ bool operator<(const hidl_vec<T>& a, const hidl_vec<T>& b) {
template <class T_Model>
struct ModelVersion;
template <>
-struct ModelVersion<hal::V1_0::Model> {
+struct ModelVersion<V1_0::Model> {
static constexpr char name[] = "V1_0";
};
template <>
-struct ModelVersion<hal::V1_1::Model> {
+struct ModelVersion<V1_1::Model> {
static constexpr char name[] = "V1_1";
};
template <>
-struct ModelVersion<hal::V1_2::Model> {
+struct ModelVersion<V1_2::Model> {
static constexpr char name[] = "V1_2";
};
template <>
-struct ModelVersion<hal::V1_3::Model> {
+struct ModelVersion<V1_3::Model> {
static constexpr char name[] = "V1_3";
};
@@ -84,16 +83,16 @@ struct ModelVersion<hal::V1_3::Model> {
template <typename T_ReturnType>
T_ReturnType uncheckedConvertTo(OperationType type);
template <>
-hal::V1_0::OperationType uncheckedConvertTo<hal::V1_0::OperationType>(OperationType type) {
- return uncheckedConvertToV1_0(type);
+V1_0::OperationType uncheckedConvertTo<V1_0::OperationType>(OperationType type) {
+ return uncheckedConvertToV1_0(convertToV1_3(type));
}
template <>
-hal::V1_1::OperationType uncheckedConvertTo<hal::V1_1::OperationType>(OperationType type) {
- return uncheckedConvertToV1_1(type);
+V1_1::OperationType uncheckedConvertTo<V1_1::OperationType>(OperationType type) {
+ return uncheckedConvertToV1_1(convertToV1_3(type));
}
template <>
-hal::V1_2::OperationType uncheckedConvertTo<hal::V1_2::OperationType>(OperationType type) {
- return uncheckedConvertToV1_2(type);
+V1_2::OperationType uncheckedConvertTo<V1_2::OperationType>(OperationType type) {
+ return uncheckedConvertToV1_2(convertToV1_3(type));
}
// Dispatcher mechanism for calling an appropriate convertToV1_* given the
@@ -101,45 +100,41 @@ hal::V1_2::OperationType uncheckedConvertTo<hal::V1_2::OperationType>(OperationT
template <typename T_ReturnType>
T_ReturnType convertTo(Operand operand);
template <>
-hal::V1_0::Operand convertTo<hal::V1_0::Operand>(Operand operand) {
- return convertToV1_0(operand);
+V1_0::Operand convertTo<V1_0::Operand>(Operand operand) {
+ return convertToV1_0(convertToV1_3(operand));
}
template <>
-hal::V1_2::Operand convertTo<hal::V1_2::Operand>(Operand operand) {
- return convertToV1_2(operand);
+V1_2::Operand convertTo<V1_2::Operand>(Operand operand) {
+ return convertToV1_2(convertToV1_3(operand));
}
// Dispatcher mechanism for calling an appropriate convertToV1_* given the
-// desired return type. Note that there are no V1_[12]::OperandLifeTime types.
+// desired return type. Note that there are no V1_[12]::Operand::LifeTime types.
template <typename T_ReturnType>
-T_ReturnType convertTo(OperandLifeTime lifetime);
+T_ReturnType convertTo(V1_3::OperandLifeTime lifetime);
template <>
-hal::V1_0::OperandLifeTime convertTo<hal::V1_0::OperandLifeTime>(OperandLifeTime lifetime) {
+V1_0::OperandLifeTime convertTo<V1_0::OperandLifeTime>(V1_3::OperandLifeTime lifetime) {
return convertToV1_0(lifetime);
}
-template <>
-hal::V1_3::OperandLifeTime convertTo<hal::V1_3::OperandLifeTime>(OperandLifeTime lifetime) {
- return lifetime;
-}
// Dispatcher mechanism for calling an appropriate compliantWithV1_* given the
// desired target model type.
template <typename T_SlicedModel>
-void getNoncompliantOperations(const hal::V1_3::Model& model,
+void getNoncompliantOperations(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations);
template <>
-void getNoncompliantOperations<hal::V1_0::Model>(const hal::V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
+void getNoncompliantOperations<V1_0::Model>(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_0(model, noncompliantOperations);
}
template <>
-void getNoncompliantOperations<hal::V1_1::Model>(const hal::V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
+void getNoncompliantOperations<V1_1::Model>(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_1(model, noncompliantOperations);
}
template <>
-void getNoncompliantOperations<hal::V1_2::Model>(const hal::V1_3::Model& model,
- std::set<uint32_t>* noncompliantOperations) {
+void getNoncompliantOperations<V1_2::Model>(const V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_2(model, noncompliantOperations);
}
@@ -191,18 +186,25 @@ MetaModel::ReturnedSlice<T_SlicedModel> MetaModel::getSlice(Slice<T_SlicedModel>
return slice->mSlicedOperationIndexToOrigIndex.at(slicedOperationIndex);
})));
}
-template MetaModel::ReturnedSlice<hal::V1_0::Model> MetaModel::getSlice(
- Slice<hal::V1_0::Model>* slice) const;
-template MetaModel::ReturnedSlice<hal::V1_1::Model> MetaModel::getSlice(
- Slice<hal::V1_1::Model>* slice) const;
-template MetaModel::ReturnedSlice<hal::V1_2::Model> MetaModel::getSlice(
- Slice<hal::V1_2::Model>* slice) const;
-// When adding HAL version 1.4, make sure to handle control flow and referenced
-// subgraphs here properly. A V1_3 sliced model should contain an IF/WHILE and
-// its referenced subgraphs only if there are no V1_4+ operations in those
-// subgraphs.
-// template MetaModel::ReturnedSlice<hal::V1_3::Model> MetaModel::getSlice(
-// Slice<hal::V1_3::Model>* slice) const;
+template MetaModel::ReturnedSlice<V1_0::Model> MetaModel::getSlice(Slice<V1_0::Model>* slice) const;
+template MetaModel::ReturnedSlice<V1_1::Model> MetaModel::getSlice(Slice<V1_1::Model>* slice) const;
+template MetaModel::ReturnedSlice<V1_2::Model> MetaModel::getSlice(Slice<V1_2::Model>* slice) const;
+template <>
+MetaModel::ReturnedSlice<V1_3::Model> MetaModel::getSlice(Slice<V1_3::Model>* slice) const {
+ CHECK(slice != nullptr);
+ if (slice->mState == SliceState::UNINITIALIZED) {
+ // When adding HAL version 1.4, make sure to handle control flow and referenced
+ // subgraphs here properly. A V1_3 sliced model should contain an IF/WHILE and
+ // its referenced subgraphs only if there are no V1_4+ operations in those
+ // subgraphs.
+ *slice = {
+ .mState = SliceState::NORMAL,
+ .mHidlModel = convertToV1_3(mModel),
+ };
+ }
+ Mapper trivialMapper = [](uint32_t i) { return i; };
+ return std::make_pair(slice->mHidlModel, trivialMapper);
+}
// Utility class for makeSlice().
//
@@ -234,8 +236,8 @@ template MetaModel::ReturnedSlice<hal::V1_2::Model> MetaModel::getSlice(
template <typename T_SlicedOperand>
class MetaModel::OrigOperandToSlicedInputOperandIndex {
public:
- OrigOperandToSlicedInputOperandIndex(hidl_vec<T_SlicedOperand>* slicedOperands,
- hidl_vec<uint32_t>* slicedInputIndexes)
+ OrigOperandToSlicedInputOperandIndex(hardware::hidl_vec<T_SlicedOperand>* slicedOperands,
+ hardware::hidl_vec<uint32_t>* slicedInputIndexes)
: mSlicedOperands(*slicedOperands), mSlicedInputIndexes(*slicedInputIndexes) {}
// Given an operand from the original model, return the index of the
@@ -246,21 +248,19 @@ class MetaModel::OrigOperandToSlicedInputOperandIndex {
auto it = mMap.find(operand);
if (it != mMap.end()) {
VLOG(COMPILATION) << "OrigOperandToSlicedInputOperandIndex::getIndex looked for "
- << toString(operand) << " and found " << it->second << ": "
- << toString(it->first);
+ << operand << " and found " << it->second << ": " << it->first;
return it->second;
}
// Create
- operand.numberOfConsumers = 0;
- operand.lifetime = convertTo<decltype(operand.lifetime)>(OperandLifeTime::SUBGRAPH_INPUT);
+ operand.lifetime = Operand::LifeTime::SUBGRAPH_INPUT;
operand.location = {};
uint32_t slicedOperandIndex =
extend(&mSlicedOperands, convertTo<T_SlicedOperand>(operand)).first;
mMap[operand] = slicedOperandIndex;
extend(&mSlicedInputIndexes, slicedOperandIndex);
VLOG(COMPILATION) << "OrigOperandToSlicedInputOperandIndex::getIndex created "
- << slicedOperandIndex << ": " << toString(operand);
+ << slicedOperandIndex << ": " << operand;
return slicedOperandIndex;
}
@@ -284,38 +284,36 @@ class MetaModel::OrigOperandToSlicedInputOperandIndex {
}
private:
- static bool compare(const SymmPerChannelQuantParams& a,
- const SymmPerChannelQuantParams& b) {
+ static bool compare(const Operand::SymmPerChannelQuantParams& a,
+ const Operand::SymmPerChannelQuantParams& b) {
if (a.scales != b.scales) {
return a.scales < b.scales;
}
return a.channelDim < b.channelDim;
}
- static bool compare(const OperandExtraParams& a, const OperandExtraParams& b) {
- if (a.getDiscriminator() != b.getDiscriminator()) {
- return a.getDiscriminator() < b.getDiscriminator();
+ static bool compare(const Operand::ExtraParams& a, const Operand::ExtraParams& b) {
+ if (a.index() != b.index()) {
+ return a.index() < b.index();
}
-
- switch (a.getDiscriminator()) {
- case OperandExtraParams::hidl_discriminator::channelQuant:
- return compare(a.channelQuant(), b.channelQuant());
-
- case OperandExtraParams::hidl_discriminator::extension:
- return a.extension() < b.extension();
-
- case OperandExtraParams::hidl_discriminator::none:
- return false;
-
- default:
- CHECK(false) << "Unexpected";
- return false;
+ if (std::holds_alternative<Operand::SymmPerChannelQuantParams>(a)) {
+ return compare(std::get<Operand::SymmPerChannelQuantParams>(a),
+ std::get<Operand::SymmPerChannelQuantParams>(b));
}
+ if (std::holds_alternative<Operand::ExtensionParams>(a)) {
+ return compare(std::get<Operand::ExtensionParams>(a),
+ std::get<Operand::ExtensionParams>(b));
+ }
+ if (std::holds_alternative<Operand::NoParams>(a)) {
+ return false;
+ }
+ CHECK(false) << "Unexpected";
+ return false;
}
};
std::map<Operand, uint32_t, Compare> mMap;
- hidl_vec<T_SlicedOperand>& mSlicedOperands;
- hidl_vec<uint32_t>& mSlicedInputIndexes;
+ hardware::hidl_vec<T_SlicedOperand>& mSlicedOperands;
+ hardware::hidl_vec<uint32_t>& mSlicedInputIndexes;
};
template <class T_SlicedModel>
@@ -329,11 +327,14 @@ void MetaModel::processOperations(
using SlicedOperation = typename Slice<T_SlicedModel>::Operation;
using SlicedOperationType = typename Slice<T_SlicedModel>::OperationType;
- const auto& origOperands = mHidlModel.main.operands;
- const auto& origOperations = mHidlModel.main.operations;
+ const auto& origOperands = mModel.main.operands;
+ const auto& origOperations = mModel.main.operations;
auto& slicedOperands = slice->mHidlModel.operands;
auto& slicedOperations = slice->mHidlModel.operations;
+ std::vector<uint32_t> origOperandNumberOfConsumers =
+ countNumberOfConsumers(origOperands.size(), origOperations);
+
for (uint32_t origOperationIndex = 0; origOperationIndex < origOperations.size();
++origOperationIndex) {
const Operation& origOperation = origOperations[origOperationIndex];
@@ -401,9 +402,9 @@ void MetaModel::processOperations(
slicedOperation.outputs[outputNum] = slicedOperandIndex;
const auto subgraphOutputLifetime = convertTo<decltype(slicedOperand.lifetime)>(
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ V1_3::OperandLifeTime::SUBGRAPH_OUTPUT);
if (!inputOperandIndexesOfCompliantOperations.count(origOperandIndex) &&
- origOperand.numberOfConsumers) {
+ origOperandNumberOfConsumers[origOperandIndex] != 0) {
// Was consumed only by noncompliant operations; convert to
// an output of the sliced model.
slicedOperand.lifetime = subgraphOutputLifetime;
@@ -427,24 +428,24 @@ MetaModel::Slice<T_SlicedModel> MetaModel::makeSlice() const {
Slice<T_SlicedModel> slice;
- const auto& origOperands = mHidlModel.main.operands;
- const auto& origOperations = mHidlModel.main.operations;
+ const auto& origOperands = mModel.main.operands;
+ const auto& origOperations = mModel.main.operations;
auto& slicedOperands = slice.mHidlModel.operands;
// Indexes of elements of noncompliant origOperations
std::set<uint32_t> noncompliantOperations;
- getNoncompliantOperations<T_SlicedModel>(mHidlModel, &noncompliantOperations);
+ getNoncompliantOperations<T_SlicedModel>(convertToV1_3(mModel), &noncompliantOperations);
// Map from an operand index in origOperands to the corresponding operand index in
// slicedOperands
std::map<uint32_t, uint32_t> origOperandIndexToSlicedIndex;
// Collect the operand indexes of every operand that is an input to a
- // compliant operation. If the operand is a CONSTANT_* or a NO_VALUE, copy
- // it to the sliced model and update origOperandIndexToSlicedIndex
- // accordingly. Otherwise, we'll deal with the operand in the subsequent
- // "Main loop", where we process operation outputs (intermediates and model
- // outputs).
+ // compliant operation. If the operand is a CONSTANT_*, POINTER, or a
+ // NO_VALUE, copy it to the sliced model and update
+ // origOperandIndexToSlicedIndex accordingly. Otherwise, we'll deal with
+ // the operand in the subsequent "Main loop", where we process operation
+ // outputs (intermediates and model outputs).
std::set<uint32_t> inputOperandIndexesOfCompliantOperations;
for (uint32_t origOperationIndex = 0; origOperationIndex < origOperations.size();
++origOperationIndex) {
@@ -455,9 +456,10 @@ MetaModel::Slice<T_SlicedModel> MetaModel::makeSlice() const {
if (inputOperandIndexesOfCompliantOperations.insert(input).second) {
const Operand& origOperand = origOperands[input];
switch (origOperand.lifetime) {
- case OperandLifeTime::CONSTANT_COPY:
- case OperandLifeTime::CONSTANT_REFERENCE:
- case OperandLifeTime::NO_VALUE: {
+ case Operand::LifeTime::CONSTANT_COPY:
+ case Operand::LifeTime::CONSTANT_REFERENCE:
+ case Operand::LifeTime::POINTER:
+ case Operand::LifeTime::NO_VALUE: {
const uint32_t slicedOperandIndex =
extend(&slicedOperands, convertTo<SlicedOperand>(origOperand))
.first;
@@ -482,7 +484,7 @@ MetaModel::Slice<T_SlicedModel> MetaModel::makeSlice() const {
// only if it is consumed by at least one compliant operation. Note that in
// the sliced model we share all model inputs of the same "type"; and that
// we may later add model inputs to the sliced model.
- for (uint32_t origInputIndex : mHidlModel.main.inputIndexes) {
+ for (uint32_t origInputIndex : mModel.main.inputIndexes) {
if (inputOperandIndexesOfCompliantOperations.count(origInputIndex)) {
const uint32_t slicedIndex =
origOperandToSlicedInputOperandIndex.getIndex(origOperands[origInputIndex]);
@@ -502,19 +504,19 @@ MetaModel::Slice<T_SlicedModel> MetaModel::makeSlice() const {
// This would be more complex and probably take more computation time, but
// it would reduce the size of the sliced model, and hence the time spent
// copying it around and passing it across the HAL interface.
- slice.mHidlModel.operandValues = mHidlModel.operandValues;
- slice.mHidlModel.pools = mHidlModel.pools;
+ slice.mHidlModel.operandValues = convertToV1_0(mModel.operandValues);
+ slice.mHidlModel.pools = convertToV1_0(mModel.pools);
if (VLOG_IS_ON(COMPILATION)) {
{
std::ostringstream fromName;
- fromName << "Slice: From " << ModelVersion<decltype(mHidlModel)>::name;
- graphDump(fromName.str().c_str(), mHidlModel);
+ fromName << "Slice: From canonical";
+ graphDump(fromName.str().c_str(), mModel);
}
{
std::ostringstream toName;
toName << "Slice: To " << ModelVersion<decltype(slice.mHidlModel)>::name;
- graphDump(toName.str().c_str(), convertToV1_3(slice.mHidlModel));
+ graphDump(toName.str().c_str(), uncheckedConvert(convertToV1_3(slice.mHidlModel)));
}
}
diff --git a/nn/common/OperationResolver.cpp b/nn/common/OperationResolver.cpp
index fce3af4cc..e6792b2a6 100644
--- a/nn/common/OperationResolver.cpp
+++ b/nn/common/OperationResolver.cpp
@@ -23,8 +23,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
// TODO(b/119608412): Find a way to not reference every operation here.
const OperationRegistration* register_ABS();
const OperationRegistration* register_ADD();
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index f0bcb0ed7..9f2af2269 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -32,8 +32,6 @@ namespace nn {
namespace {
-using namespace hal;
-
bool validateOperandTypes(const std::vector<OperandType>& expectedTypes, const char* tag,
uint32_t operandCount,
std::function<OperandType(uint32_t)> getOperandType) {
@@ -41,8 +39,8 @@ bool validateOperandTypes(const std::vector<OperandType>& expectedTypes, const c
for (uint32_t i = 0; i < operandCount; ++i) {
OperandType type = getOperandType(i);
NN_RET_CHECK(type == expectedTypes[i])
- << "Invalid " << tag << " tensor type " << toString(type) << " for " << tag << " "
- << i << ", expected " << toString(expectedTypes[i]);
+ << "Invalid " << tag << " tensor type " << type << " for " << tag << " " << i
+ << ", expected " << expectedTypes[i];
}
return true;
}
@@ -97,17 +95,17 @@ bool validateHalVersion(const IOperationValidationContext* context,
if (i != 0) {
message << ", ";
}
- message << toString(context->getInputType(i));
+ message << context->getInputType(i);
}
message << "} and outputs {";
for (uint32_t i = 0, n = context->getNumOutputs(); i < n; ++i) {
if (i != 0) {
message << ", ";
}
- message << toString(context->getOutputType(i));
+ message << context->getOutputType(i);
}
- message << "} is only supported since " << toString(minSupportedHalVersion)
- << " (validating using " << toString(context->getHalVersion()) << ")";
+ message << "} is only supported since " << minSupportedHalVersion << " (validating using "
+ << context->getHalVersion() << ")";
NN_RET_CHECK_FAIL() << message.str();
}
return true;
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 1c41e5940..398da5508 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -22,6 +22,10 @@
#include <android-base/properties.h>
#include <android-base/strings.h>
#include <errno.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.3/Conversions.h>
#include <poll.h>
#include <algorithm>
@@ -42,13 +46,12 @@
#include "NeuralNetworksOEM.h"
#include "OperationResolver.h"
#include "ValidateHal.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-using namespace hal;
-
-constexpr PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
+constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
const char kVLogPropKey[] = "debug.nn.vlog";
int vLogMask = ~0;
@@ -98,21 +101,26 @@ void initVLogMask() {
}
}
-Deadline makeDeadline(uint64_t duration) {
+TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) {
+ // According to the standard, std::chrono::nanoseconds::rep is a signed
+ // integer type of at least 64 bits. This check prevents an overflow when
+ // rep is exactly 64 bits.
+ if constexpr (sizeof(std::chrono::nanoseconds::rep) == sizeof(int64_t)) {
+ nanoseconds = std::min(nanoseconds,
+ static_cast<uint64_t>(std::chrono::nanoseconds::max().count()));
+ }
+ return std::chrono::nanoseconds{nanoseconds};
+}
+
+Deadline makeDeadline(TimeoutDuration duration) {
const auto maxTime = Deadline::max();
const auto currentTime = std::chrono::steady_clock::now();
- // Create Deadline. If there would be an overflow, use the max value.
- const uint64_t remainingNanoseconds =
- std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime - currentTime).count();
- if (duration > remainingNanoseconds) {
+ // If there would be an overflow, use the max value.
+ if (duration > maxTime - currentTime) {
return maxTime;
}
- return currentTime + std::chrono::nanoseconds{duration};
-}
-
-std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
- return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+ return currentTime + duration;
}
static uint64_t getMaxNanosecondsSinceEpoch() {
@@ -121,8 +129,8 @@ static uint64_t getMaxNanosecondsSinceEpoch() {
return maxTime.time_since_epoch().count();
}
-std::optional<Deadline> makeDeadline(const OptionalTimePoint& timePoint) {
- using Discriminator = hal::OptionalTimePoint::hidl_discriminator;
+std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint) {
+ using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator;
if (timePoint.getDiscriminator() == Discriminator::none) {
return std::nullopt;
}
@@ -146,12 +154,7 @@ bool hasDeadlinePassed(const std::optional<Deadline>& deadline) {
}
static OptionalTimePoint makeTimePoint(const Deadline& deadline) {
- const auto timeSinceEpoch = deadline.time_since_epoch();
- const uint64_t nanosecondsSinceEpoch =
- std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
- OptionalTimePoint ret;
- ret.nanosecondsSinceEpoch(nanosecondsSinceEpoch);
- return ret;
+ return deadline;
}
OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline) {
@@ -159,18 +162,18 @@ OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline) {
}
static bool isExtensionOperandType(int32_t type) {
- return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX);
+ return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
}
static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
- return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX);
+ return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
}
-bool isExtensionOperandType(OperandType type) {
+bool isExtensionOperandType(V1_3::OperandType type) {
return isExtensionOperandType(static_cast<int32_t>(type));
}
-bool isExtensionOperationType(OperationType type) {
+bool isExtensionOperationType(V1_3::OperationType type) {
return isExtensionOperationType(static_cast<int32_t>(type));
}
@@ -211,7 +214,7 @@ class OperationValidationContext : public IOperationValidationContext {
uint32_t getNumInputs() const override;
OperandType getInputType(uint32_t index) const override;
Shape getInputShape(uint32_t index) const override;
- const OperandExtraParams getInputExtraParams(uint32_t index) const override;
+ const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override;
uint32_t getNumOutputs() const override;
OperandType getOutputType(uint32_t index) const override;
@@ -266,7 +269,7 @@ Shape OperationValidationContext::getInputShape(uint32_t index) const {
operand->extraParams};
}
-const OperandExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
+const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const {
return getInputOperand(index)->extraParams;
}
@@ -284,15 +287,11 @@ Shape OperationValidationContext::getOutputShape(uint32_t index) const {
#define COUNT(X) (sizeof(X) / sizeof(X[0]))
-std::string getOperandTypeName(OperandType type) {
+std::string getOperandTypeName(V1_3::OperandType type) {
return toString(type);
}
-static std::string getOperationName(uint32_t code) {
- return getOperationName(static_cast<OperationType>(code));
-}
-
-std::string getOperationName(OperationType type) {
+std::string getOperationName(V1_3::OperationType type) {
return toString(type);
}
@@ -360,12 +359,14 @@ bool nonExtensionOperandTypeIsScalar(int type) {
}
uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
- CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
- int n = static_cast<int>(type);
- uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
- return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
- ? sizeOfElement
- : sizeOfTensorData(sizeOfElement, dimensions);
+ const size_t size = getNonExtensionSize(type, dimensions).value();
+ CHECK_LE(size, std::numeric_limits<uint32_t>::max());
+ return size;
+}
+
+uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions);
}
// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t.
@@ -389,9 +390,9 @@ uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& d
return size;
}
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
const std::vector<uint32_t>& dimensions) {
- CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
+ CHECK(!isExtension(type)) << "Size of extension operand data is unknown";
int n = static_cast<int>(type);
uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
@@ -399,6 +400,11 @@ bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
: sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions);
}
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions);
+}
+
bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,
const std::vector<uint32_t>& dimensions) {
return sizeOfTensorDataHelper(sizeOfElement, dimensions).first;
@@ -417,11 +423,21 @@ bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t
dimensions.size());
}
+bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions) {
+ return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
+ dimensions.size());
+}
+
bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
}
bool tensorHasUnspecifiedDimensions(const Operand& operand) {
+ return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions);
+}
+
+bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) {
return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
operand.dimensions.size());
}
@@ -490,10 +506,15 @@ void logModelToInfo(const V1_3::Model& model) {
LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
}
+void logModelToInfo(const Model& model) {
+ LOG(INFO) << "Model start";
+ logModelToInfo(convertToV1_3(model));
+}
+
bool validateOperandSymmPerChannelQuantParams(
- const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
- const char* tag) {
- if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
+ const V1_3::Operand& halOperand,
+ const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) {
+ if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
return false;
}
@@ -663,17 +684,15 @@ int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t
}
for (uint32_t i = 0; i < inOperandCount; i++) {
if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
- LOG(ERROR) << "Invalid input tensor type "
- << toString(operands[inOperandIndexes[i]].type) << " for input " << i
- << ", expected " << toString(inExpectedTypes[i]);
+ LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type
+ << " for input " << i << ", expected " << inExpectedTypes[i];
return ANEURALNETWORKS_BAD_DATA;
}
}
for (uint32_t i = 0; i < outOperandCount; i++) {
if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
- LOG(ERROR) << "Invalid output tensor type "
- << toString(operands[outOperandIndexes[i]].type) << " for input " << i
- << ", expected " << toString(outExpectedInTypes[i]);
+ LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type
+ << " for input " << i << ", expected " << outExpectedInTypes[i];
return ANEURALNETWORKS_BAD_DATA;
}
}
@@ -684,9 +703,9 @@ int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t
static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
HalVersion minSupportedHalVersion) {
if (halVersion < minSupportedHalVersion) {
- LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
- << " are only supported in " << toString(minSupportedHalVersion)
- << " and later (validating using " << toString(halVersion) << ")";
+ LOG(ERROR) << "The given inputs and outputs for operation " << opType
+ << " are only supported in " << minSupportedHalVersion
+ << " and later (validating using " << halVersion << ")";
return ANEURALNETWORKS_BAD_DATA;
}
return ANEURALNETWORKS_NO_ERROR;
@@ -695,7 +714,7 @@ static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion ha
// Checks if two operands have the same types, ranks (if specified), dimensions
// (if specified), scales, zeroPoints, and extraParams.
static bool compatible(const Operand& a, const Operand& b) {
- NN_RET_CHECK(a.type == b.type) << toString(a.type) << " != " << toString(b.type);
+ NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type;
if (a.dimensions.size() != 0 && b.dimensions.size() != 0) {
NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions";
for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) {
@@ -706,14 +725,13 @@ static bool compatible(const Operand& a, const Operand& b) {
}
NN_RET_CHECK_EQ(a.scale, b.scale);
NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
- NN_RET_CHECK(a.extraParams == b.extraParams)
- << toString(a.extraParams) << " != " << toString(b.extraParams);
+ NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams;
return true;
}
static bool validateConditionOperand(const Operand& operand) {
NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8)
- << "Unexpected condition operand type: " << toString(operand.type);
+ << "Unexpected condition operand type: " << operand.type;
NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton";
NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton";
return true;
@@ -764,8 +782,7 @@ static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uin
static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper,
const Operand& operand) {
- if (!helper.allowControlFlowOperationWithOperandOfUnknownSize &&
- !isExtensionOperandType(operand.type)) {
+ if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) {
NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u);
}
return true;
@@ -847,8 +864,7 @@ static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs,
static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
const uint32_t* inputIndexes, uint32_t outputCount,
const uint32_t* outputIndexes,
- const std::vector<hal::Operand>& operands,
- HalVersion halVersion) {
+ const std::vector<Operand>& operands, HalVersion halVersion) {
if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) {
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
LOG(ERROR) << "This validateOperation() overload does not support control flow";
@@ -873,7 +889,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if (halVersion < HalVersion::V1_2) {
LOG(ERROR)
<< "Extension operations are supported since HAL version 1.2, validating using "
- << toString(halVersion);
+ << halVersion;
return ANEURALNETWORKS_BAD_DATA;
}
// There is no other validation we can do for an extension operation.
@@ -883,7 +899,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
<< ") or output operands (" << outputCount << ", expected " << expOut
- << ") for operation " << getOperationName(opType);
+ << ") for operation " << opType;
};
switch (opType) {
@@ -916,14 +932,12 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
OperandType::TENSOR_INT32};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -934,7 +948,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 3 or 2) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -957,8 +971,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 3) {
@@ -975,7 +988,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 3 or 2) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -998,8 +1011,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 3) {
@@ -1023,8 +1035,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM &&
inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType};
@@ -1051,8 +1062,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if (inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
@@ -1074,8 +1084,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto hashType = operands[inputIndexes[0]].type;
@@ -1097,8 +1106,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
OperandType::INT32,
};
} else {
- LOG(ERROR) << "Unsupported hash tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported hash tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
@@ -1117,7 +1125,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
outputCount != kNumOutputsMergedWithState)) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 61) or output operands (" << outputCount
- << ", expected 1, 2, 5 or 6) for operation " << getOperationName(opType);
+ << ", expected 1, 2, 5 or 6) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1125,8 +1133,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
auto inputType = operands[inputIndexes[0]].type;
if (inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_FLOAT16) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1162,7 +1169,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if ((inputCount != 23 && inputCount != 27) || outputCount != 4) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 23 or 27) or output operands (" << outputCount
- << ", expected 4) for operation " << getOperationName(opType);
+ << ", expected 4) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes;
@@ -1170,8 +1177,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
auto inputType = operands[inputIndexes[0]].type;
if (inputType != OperandType::TENSOR_FLOAT32 &&
inputType != OperandType::TENSOR_FLOAT16) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1239,8 +1245,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
OperandType::TENSOR_INT32,
};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
@@ -1279,8 +1284,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
OperandType::TENSOR_FLOAT16,
};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1299,8 +1303,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
} else if (inputType == OperandType::TENSOR_FLOAT16) {
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {
@@ -1316,7 +1319,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 3 or 2) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1349,8 +1352,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 3) {
@@ -1367,7 +1369,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 4 or 3) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1407,8 +1409,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
};
outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputCount == 4) {
@@ -1462,14 +1463,12 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1514,14 +1513,12 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
}; // TODO(b/116699425): Make it UINT8.
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
@@ -1559,7 +1556,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
outExpectedTypes = {inputType}; // Only identity CAST is supported.
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
} else {
- LOG(ERROR) << "Unsupported data type for operation " << getOperationName(opType);
+ LOG(ERROR) << "Unsupported data type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
// Validate that output shape is equal to input shape if dimensions
@@ -1586,8 +1583,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
}
const auto inputRank = operands[inputIndexes[0]].dimensions.size();
if (inputRank > 4) {
- LOG(ERROR) << "Unsupported input tensor rank for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1600,8 +1596,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
} else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32,
@@ -1628,8 +1623,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {inputType, OperandType::INT32};
outExpectedTypes = {OperandType::TENSOR_INT32};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
@@ -1653,8 +1647,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {inputType, OperandType::INT32};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1669,7 +1662,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
case ANEURALNETWORKS_SPLIT: {
if (inputCount != 3) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
- << getOperationName(opType);
+ << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1678,8 +1671,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inputType != OperandType::TENSOR_INT32 &&
inputType != OperandType::TENSOR_QUANT8_ASYMM &&
inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1711,8 +1703,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {inputType, inputType};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1728,7 +1719,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
LOG(ERROR) << "Invalid number of input operands (" << inputCount
<< ", expected 12 or 9) or output operands (" << outputCount
- << ", expected 1) for operation " << getOperationName(opType);
+ << ", expected 1) for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
auto inputType = operands[inputIndexes[0]].type;
@@ -1751,15 +1742,16 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
if (filterType != inputType &&
filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- LOG(ERROR) << "Unsupported filter tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported filter tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
- operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) {
+ std::get<Operand::SymmPerChannelQuantParams>(
+ operands[inputIndexes[1]].extraParams)
+ .channelDim != 0) {
LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
- << getOperationName(opType);
+ << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1769,8 +1761,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
OperandType::INT32, OperandType::INT32};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
@@ -1805,8 +1796,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1831,8 +1821,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
inExpectedTypes = {inputType, inputType};
outExpectedTypes = {inputType};
} else {
- LOG(ERROR) << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
@@ -1864,7 +1853,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
static_cast<OperationType>(opType));
if (operationRegistration == nullptr) {
if (0 <= opType && opType < kNumberOfOperationTypes) {
- LOG(ERROR) << getOperationName(opType) << " not registered";
+ LOG(ERROR) << opType << " not registered";
} else {
LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
<< kNumberOfOperationTypes << ")";
@@ -1872,14 +1861,14 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
if (operationRegistration->validate == nullptr) {
- LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
+ LOG(ERROR) << "Incomplete operation registration: " << opType;
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
OperationValidationContext context(operationRegistration->name, inputCount,
inputIndexes, outputCount, outputIndexes,
operands.data(), halVersion);
if (!operationRegistration->validate(&context)) {
- LOG(ERROR) << "Validation failed for operation " << getOperationName(opType);
+ LOG(ERROR) << "Validation failed for operation " << opType;
return ANEURALNETWORKS_BAD_DATA;
}
return ANEURALNETWORKS_NO_ERROR;
@@ -1943,12 +1932,28 @@ int convertErrorStatusToResultCode(ErrorStatus status) {
return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
+ case ErrorStatus::DEAD_OBJECT:
+ return ANEURALNETWORKS_DEAD_OBJECT;
}
- LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
- << " mapped to ANEURALNETWORKS_OP_FAILED";
+ LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED";
return ANEURALNETWORKS_OP_FAILED;
}
+V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) {
+ return convertToV1_3(convertResultCodeToErrorStatus(resultCode));
+}
+
+int convertErrorStatusToResultCode(V1_3::ErrorStatus status) {
+ return convertErrorStatusToResultCode(uncheckedConvert(status));
+}
+
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
+}
+
std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
@@ -1966,42 +1971,22 @@ std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
return {n, std::move(outputShapes), timing};
}
-std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
- const std::vector<uint32_t>& rhs) {
- if (rhs.empty()) return lhs;
- if (lhs.empty()) return rhs;
- if (lhs.size() != rhs.size()) {
- LOG(ERROR) << "Incompatible ranks: " << toString(lhs) << " and " << toString(rhs);
- return std::nullopt;
- }
- std::vector<uint32_t> combined = lhs;
- for (uint32_t i = 0; i < lhs.size(); i++) {
- if (lhs[i] == 0) {
- combined[i] = rhs[i];
- } else if (rhs[i] != 0 && lhs[i] != rhs[i]) {
- LOG(ERROR) << "Incompatible dimensions: " << toString(lhs) << " and " << toString(rhs);
- return std::nullopt;
- }
- }
- return combined;
-}
-
// Capabilities::operandPerformance utilities.
// The field Capabilities::operandPerformance is a vector sorted by the field
// Capabilities::OperandPerformance::type.
template <HalVersion version>
-hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
- PerformanceInfo perf) {
+hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ V1_0::PerformanceInfo perf) {
using OpPerf = VersionedOperandPerformance<version>;
// Note: range presents enumerators in declaration order, not in numerical order.
- static constexpr hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
+ static constexpr hardware::hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
std::vector<OpPerf> ret;
ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin());
for (VersionedOperandType<version> type : kOperandTypeRange) {
- if (static_cast<OperandType>(type) != OperandType::SUBGRAPH) {
+ if (static_cast<V1_3::OperandType>(type) != V1_3::OperandType::SUBGRAPH) {
ret.push_back(OpPerf{type, perf});
}
}
@@ -2011,14 +1996,14 @@ hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
return ret;
}
-template hal::hidl_vec<V1_2::Capabilities::OperandPerformance>
-nonExtensionOperandPerformance<HalVersion::V1_2>(PerformanceInfo perf);
-template hal::hidl_vec<V1_3::Capabilities::OperandPerformance>
-nonExtensionOperandPerformance<HalVersion::V1_3>(PerformanceInfo perf);
+template hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_2>(V1_0::PerformanceInfo perf);
+template hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_3>(V1_0::PerformanceInfo perf);
template <HalVersion version>
-void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
- VersionedOperandType<version> type, hal::PerformanceInfo perf) {
+void update(hardware::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
+ VersionedOperandType<version> type, V1_0::PerformanceInfo perf) {
CHECK(operandPerformance != nullptr);
const auto it =
std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
@@ -2029,23 +2014,24 @@ void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerforma
it->info = perf;
}
-void update(hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
- V1_2::OperandType type, PerformanceInfo perf) {
+void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, V1_0::PerformanceInfo perf) {
update<HalVersion::V1_2>(operandPerformance, type, perf);
}
-void update(hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
- V1_3::OperandType type, PerformanceInfo perf) {
+void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, V1_0::PerformanceInfo perf) {
update<HalVersion::V1_3>(operandPerformance, type, perf);
}
template <HalVersion version>
-PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
- VersionedOperandType<version> type) {
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
+ VersionedOperandType<version> type) {
const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
[](const VersionedOperandPerformance<version>& perf,
VersionedOperandType<version> type) {
- return static_cast<OperandType>(perf.type) <
- static_cast<OperandType>(type);
+ return static_cast<V1_3::OperandType>(perf.type) <
+ static_cast<V1_3::OperandType>(type);
});
if (it == operandPerformance.end()) {
LOG(WARNING) << "No PerformanceInfo for " << toString(type);
@@ -2055,12 +2041,14 @@ PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& ope
}
}
-PerformanceInfo lookup(const hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
- V1_2::OperandType type) {
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type) {
return lookup<HalVersion::V1_2>(operandPerformance, type);
}
-PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
- V1_3::OperandType type) {
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type) {
CHECK(type != V1_3::OperandType::SUBGRAPH)
<< "Use Capabilities::ifPerformance or Capabilities::whilePerformance";
return lookup<HalVersion::V1_3>(operandPerformance, type);
@@ -2070,16 +2058,16 @@ PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& o
// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
// This array must be in sorted order.
-static const OperandType kQuantized8PerformanceConsistentWithP[] = {
- OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM,
- OperandType::TENSOR_OEM_BYTE};
+static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = {
+ V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32,
+ V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE};
static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
- const PerformanceInfo quantized8Performance =
+ const V1_0::PerformanceInfo quantized8Performance =
lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
std::end(kQuantized8PerformanceConsistentWithP),
- [quantized8Performance, &capabilities](OperandType type) {
+ [quantized8Performance, &capabilities](V1_3::OperandType type) {
return quantized8Performance ==
lookup(capabilities.operandPerformance,
static_cast<V1_2::OperandType>(type));
@@ -2087,26 +2075,26 @@ static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& cap
}
static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
- const PerformanceInfo quantized8Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
+ const V1_0::PerformanceInfo quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM);
return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
std::end(kQuantized8PerformanceConsistentWithP),
- [quantized8Performance, &capabilities](OperandType type) {
+ [quantized8Performance, &capabilities](V1_3::OperandType type) {
return quantized8Performance ==
lookup(capabilities.operandPerformance, type);
});
}
-static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
- PerformanceInfo quantized8Performance) {
- hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
+static hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>
+makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) {
+ hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
std::size(kQuantized8PerformanceConsistentWithP));
- std::transform(
- std::begin(kQuantized8PerformanceConsistentWithP),
- std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
- [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
- return {static_cast<V1_2::OperandType>(type), quantized8Performance};
- });
+ std::transform(std::begin(kQuantized8PerformanceConsistentWithP),
+ std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
+ [quantized8Performance](
+ V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance {
+ return {static_cast<V1_2::OperandType>(type), quantized8Performance};
+ });
return ret;
}
@@ -2119,9 +2107,9 @@ bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
}
bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
- const PerformanceInfo perfTensorFloat32 =
+ const V1_0::PerformanceInfo perfTensorFloat32 =
lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
- const PerformanceInfo perfFloat32 =
+ const V1_0::PerformanceInfo perfFloat32 =
lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
if (perfTensorFloat32 != perfFloat32 ||
perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
@@ -2133,10 +2121,10 @@ bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
}
bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
- const PerformanceInfo perfTensorFloat32 =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
- const PerformanceInfo perfFloat32 =
- lookup(capabilities.operandPerformance, OperandType::FLOAT32);
+ const V1_0::PerformanceInfo perfTensorFloat32 =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32);
+ const V1_0::PerformanceInfo perfFloat32 =
+ lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32);
if (perfTensorFloat32 != perfFloat32 ||
perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
@@ -2168,8 +2156,8 @@ bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
- (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
- lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
+ (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) !=
+ lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) {
return false;
}
@@ -2323,9 +2311,9 @@ V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) {
<< " from V1_3::Capabilities to V1_0::Capabilities";
}
return {.float32Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
- .quantized8Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance = lookup(capabilities.operandPerformance,
+ V1_3::OperandType::TENSOR_QUANT8_ASYMM)};
}
V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
@@ -2357,9 +2345,9 @@ V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) {
<< " from V1_3::Capabilities to V1_1::Capabilities";
}
return {.float32Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32),
.quantized8Performance =
- lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
+ lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM),
.relaxedFloat32toFloat16Performance =
capabilities.relaxedFloat32toFloat16PerformanceTensor};
}
@@ -2415,7 +2403,7 @@ V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) {
capabilities.relaxedFloat32toFloat16PerformanceTensor,
};
const auto& inputOpPerf = capabilities.operandPerformance;
- hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
+ hardware::hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
opPerfSupported.resize(inputOpPerf.size());
auto last =
std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
@@ -2477,17 +2465,18 @@ static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
.outputs = operation.outputs};
}
-static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hidl_vec<V1_1::Operation>& operations) {
- hidl_vec<V1_0::Operation> result(operations.size());
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
return result;
}
-static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
- hidl_vec<V1_1::Operation> result(operations.size());
+static hardware::hidl_vec<V1_1::Operation> convertToV1_1(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_0::Operation& operation) { return convertToV1_1(operation); });
return result;
@@ -2513,13 +2502,15 @@ static bool compliantWith(HalVersion version, const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations) {
// A boolean vector indicating whether each pool is compliant with the target HAL version.
std::vector<bool> isPoolCompliant(model.pools.size(), false);
- std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
- [version](const hidl_memory& pool) { return validatePool(pool, version); });
+ std::transform(
+ model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
+ [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); });
// A boolean vector indicating whether each operand is compliant with the target HAL version.
std::vector<bool> isOperandCompliant(model.main.operands.size(), false);
std::transform(model.main.operands.begin(), model.main.operands.end(),
- isOperandCompliant.begin(), [&isPoolCompliant, version](const Operand& op) {
+ isOperandCompliant.begin(),
+ [&isPoolCompliant, version](const V1_3::Operand& op) {
bool is_operand_compliant = false;
switch (version) {
case HalVersion::UNKNOWN:
@@ -2541,22 +2532,24 @@ static bool compliantWith(HalVersion version, const V1_3::Model& model,
break;
}
return is_operand_compliant &&
- !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
+ !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE &&
!isPoolCompliant[op.location.poolIndex]);
});
- auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) {
+ auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec<uint32_t>& indices) {
return std::all_of(
indices.begin(), indices.end(),
[&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
};
- auto localValidateOperation = [&model, version, &allOperandsCompliant](const Operation& op) {
+ auto localValidateOperation = [&model, version,
+ &allOperandsCompliant](const V1_3::Operation& op) {
if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
- int error = validateOperation(
- static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.main.operands, version);
+ int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr,
+ uncheckedConvert(model.main.operands), version);
return error == ANEURALNETWORKS_NO_ERROR;
};
@@ -2586,15 +2579,17 @@ bool compliantWithV1_0(const V1_1::Model& model) {
// V1_0::Model because all 1.0 drivers require strict calculation by default
// in the P NN runtime. Even if fp16 calculations are allowed, they can
// still be computed by a strict fp32 driver.
- return std::all_of(
- model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
- int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr,
- op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr,
- convertToV1_3(model.operands), HalVersion::V1_0);
- return error == ANEURALNETWORKS_NO_ERROR;
- });
+ auto operands = uncheckedConvert(convertToV1_3(model.operands));
+ return std::all_of(model.operations.begin(), model.operations.end(),
+ [&operands](const V1_1::Operation& op) {
+ int error = validateOperation(
+ static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands,
+ HalVersion::V1_0);
+ return error == ANEURALNETWORKS_NO_ERROR;
+ });
}
bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
@@ -2697,81 +2692,86 @@ static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) {
.outputs = operation.outputs};
}
-static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hidl_vec<V1_3::Operation>& operations) {
- hidl_vec<V1_0::Operation> result(operations.size());
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
return result;
}
-static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
- const hidl_vec<V1_2::Operation>& operations) {
- hidl_vec<V1_0::Operation> result(operations.size());
+static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_0::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
return result;
}
-static hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
- const hidl_vec<V1_3::Operation>& operations) {
- hidl_vec<V1_2::Operation> result(operations.size());
+static hardware::hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
return result;
}
-static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
- const hidl_vec<V1_2::Operation>& operations) {
- hidl_vec<V1_1::Operation> result(operations.size());
+static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
return result;
}
-static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
- const hidl_vec<V1_3::Operation>& operations) {
- hidl_vec<V1_1::Operation> result(operations.size());
+static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hardware::hidl_vec<V1_3::Operation>& operations) {
+ hardware::hidl_vec<V1_1::Operation> result(operations.size());
std::transform(
operations.begin(), operations.end(), result.begin(),
[](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
return result;
}
-static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
- hidl_vec<V1_2::Operation> result(operations.size());
+static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_0::Operation& operation) { return convertToV1_2(operation); });
return result;
}
-static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) {
- hidl_vec<V1_2::Operation> result(operations.size());
+static hardware::hidl_vec<V1_2::Operation> convertToV1_2(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_2::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_1::Operation& operation) { return convertToV1_2(operation); });
return result;
}
-static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_0::Operation>& operations) {
- hidl_vec<V1_3::Operation> result(operations.size());
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_0::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_0::Operation& operation) { return convertToV1_3(operation); });
return result;
}
-static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_1::Operation>& operations) {
- hidl_vec<V1_3::Operation> result(operations.size());
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_1::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_1::Operation& operation) { return convertToV1_3(operation); });
return result;
}
-static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_2::Operation>& operations) {
- hidl_vec<V1_3::Operation> result(operations.size());
+static hardware::hidl_vec<V1_3::Operation> convertToV1_3(
+ const hardware::hidl_vec<V1_2::Operation>& operations) {
+ hardware::hidl_vec<V1_3::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
[](const V1_2::Operation& operation) { return convertToV1_3(operation); });
return result;
@@ -2817,19 +2817,19 @@ V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) {
return static_cast<V1_0::OperandType>(operandType);
}
-bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime) {
+bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) {
return true;
}
-bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime) {
+bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) {
return lifetime != V1_3::OperandLifeTime::SUBGRAPH;
}
-bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime) {
+bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) {
return true;
}
-bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime) {
+bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) {
return true;
}
@@ -2919,57 +2919,57 @@ V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
return operand;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_0::Operand>& operands) {
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands) {
return operands;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
- hidl_vec<V1_0::Operand> result(operands.size());
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ hardware::hidl_vec<V1_0::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_2::Operand& operand) { return convertToV1_0(operand); });
return result;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_3::Operand>& operands) {
- hidl_vec<V1_0::Operand> result(operands.size());
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ hardware::hidl_vec<V1_0::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_3::Operand& operand) { return convertToV1_0(operand); });
return result;
}
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
- hidl_vec<V1_2::Operand> result(operands.size());
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ hardware::hidl_vec<V1_2::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_0::Operand& operand) { return convertToV1_2(operand); });
return result;
}
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) {
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands) {
return operands;
}
-hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_3::Operand>& operands) {
- hidl_vec<V1_2::Operand> result(operands.size());
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ hardware::hidl_vec<V1_2::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_3::Operand& operand) { return convertToV1_2(operand); });
return result;
}
-hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_0::Operand>& operands) {
- hidl_vec<V1_3::Operand> result(operands.size());
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands) {
+ hardware::hidl_vec<V1_3::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_0::Operand& operand) { return convertToV1_3(operand); });
return result;
}
-hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_2::Operand>& operands) {
- hidl_vec<V1_3::Operand> result(operands.size());
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands) {
+ hardware::hidl_vec<V1_3::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
[](const V1_2::Operand& operand) { return convertToV1_3(operand); });
return result;
}
-hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_3::Operand>& operands) {
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands) {
return operands;
}
@@ -3158,16 +3158,16 @@ bool compliantWithV1_2(const V1_3::Request& request) {
});
}
-static hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
+static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
switch (pool.getDiscriminator()) {
case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
return pool.hidlMemory();
case V1_3::Request::MemoryPool::hidl_discriminator::token:
- return hidl_memory{};
+ return hardware::hidl_memory{};
}
}
-static V1_3::Request::MemoryPool convertToV1_3(const hidl_memory& pool) {
+static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) {
V1_3::Request::MemoryPool ret;
ret.hidlMemory(pool);
return ret;
@@ -3178,7 +3178,7 @@ V1_0::Request convertToV1_0(const V1_0::Request& request) {
}
static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) {
- hidl_vec<hidl_memory> pools(request.pools.size());
+ hardware::hidl_vec<hardware::hidl_memory> pools(request.pools.size());
std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
[](const auto& pool) { return convertToV1_0(pool); });
return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
@@ -3201,7 +3201,7 @@ V1_0::Request convertToV1_2(const V1_3::Request& request) {
}
V1_3::Request convertToV1_3(const V1_0::Request& request) {
- hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
+ hardware::hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
[](const auto& pool) { return convertToV1_3(pool); });
return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
@@ -3257,5 +3257,293 @@ uint32_t getProp(const char* str, uint32_t defaultValue) {
}
#endif // NN_DEBUGGABLE
+ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) {
+ return nnTryGetValue(convert(status));
+}
+
+ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) {
+ return nnTryGetValue(convert(status));
+}
+
+OperandType uncheckedConvert(V1_3::OperandType operandType) {
+ return nnTryGetValue(convert(operandType));
+}
+
+OperationType uncheckedConvert(V1_3::OperationType operandType) {
+ return nnTryGetValue(convert(operandType));
+}
+
+Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) {
+ return nnTryGetValue(convert(lifetime));
+}
+
+MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) {
+ return nnTryGetValue(convert(measure));
+}
+
+DataLocation uncheckedConvert(const V1_0::DataLocation& location) {
+ return nnTryGetValue(convert(location));
+}
+
+Operand uncheckedConvert(const V1_3::Operand& operand) {
+ return nnTryGetValue(convert(operand));
+}
+
+Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) {
+ return nnTryGetValue(convert(params));
+}
+
+Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) {
+ return nnTryGetValue(convert(params));
+}
+
+Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params) {
+ return params;
+}
+
+Operation uncheckedConvert(const V1_3::Operation& operation) {
+ return nnTryGetValue(convert(operation));
+}
+
+template <typename CanonicalType, typename HalType>
+static std::vector<CanonicalType> convertVec(const hardware::hidl_vec<HalType>& items) {
+ std::vector<CanonicalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const HalType& item) { return uncheckedConvert(item); });
+ return result;
+}
+
+Model uncheckedConvert(const V1_3::Model& model) {
+ return nnTryGetValue(convert(model));
+}
+
+Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) {
+ return nnTryGetValue(convert(subgraph));
+}
+
+Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) {
+ return nnTryGetValue(convert(x));
+}
+
+Request uncheckedConvert(const V1_3::Request& request) {
+ return nnTryGetValue(convert(request));
+}
+
+Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) {
+ return nnTryGetValue(convert(requestArgument));
+}
+
+Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) {
+ return nnTryGetValue(convert(memoryPool));
+}
+
+OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) {
+ return nnTryGetValue(convert(outputShape));
+}
+
+std::vector<OutputShape> uncheckedConvert(
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) {
+ return convertVec<OutputShape>(outputShapes);
+}
+
+Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) {
+ return nnTryGetValue(convert(capabilities));
+}
+
+Capabilities::OperandPerformance uncheckedConvert(
+ const V1_3::Capabilities::OperandPerformance& operandPerformance) {
+ return nnTryGetValue(convert(operandPerformance));
+}
+
+Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) {
+ return nnTryGetValue(convert(performanceInfo));
+}
+
+Extension uncheckedConvert(const V1_2::Extension& extension) {
+ return nnTryGetValue(convert(extension));
+}
+
+std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions) {
+ return convertVec<Extension>(extensions);
+}
+
+Extension::OperandTypeInformation uncheckedConvert(
+ const V1_2::Extension::OperandTypeInformation& info) {
+ return nnTryGetValue(convert(info));
+}
+
+OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) {
+ return nnTryGetValue(convert(timeoutDuration));
+}
+
+Timing uncheckedConvert(const V1_2::Timing& timing) {
+ return nnTryGetValue(convert(timing));
+}
+
+V1_0::ErrorStatus convertToV1_0(ErrorStatus status) {
+ return static_cast<V1_0::ErrorStatus>(static_cast<int>(status));
+}
+
+V1_3::ErrorStatus convertToV1_3(ErrorStatus status) {
+ return nnTryGetValue(V1_3::utils::convert(status));
+}
+
+V1_3::OperandType convertToV1_3(OperandType operandType) {
+ return nnTryGetValue(V1_3::utils::convert(operandType));
+}
+
+V1_3::OperationType convertToV1_3(OperationType operandType) {
+ return nnTryGetValue(V1_3::utils::convert(operandType));
+}
+
+V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) {
+ return nnTryGetValue(V1_3::utils::convert(lifetime));
+}
+
+V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) {
+ return nnTryGetValue(V1_1::utils::convert(preference));
+}
+
+V1_3::Priority convertToV1_3(Priority priority) {
+ return nnTryGetValue(V1_3::utils::convert(priority));
+}
+
+V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) {
+ return nnTryGetValue(V1_2::utils::convert(measure));
+}
+
+V1_0::DataLocation convertToV1_0(const DataLocation& location) {
+ return nnTryGetValue(V1_0::utils::convert(location));
+}
+
+V1_3::Operand convertToV1_3(const Operand& operand) {
+ return nnTryGetValue(V1_3::utils::convert(operand));
+}
+
+V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) {
+ return nnTryGetValue(V1_2::utils::convert(params));
+}
+
+V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) {
+ return nnTryGetValue(V1_2::utils::convert(params));
+}
+
+hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params) {
+ return params;
+}
+
+V1_3::Operation convertToV1_3(const Operation& operation) {
+ return nnTryGetValue(V1_3::utils::convert(operation));
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_0(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_0(item); });
+ return result;
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_2(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_2(item); });
+ return result;
+}
+
+template <typename HalType, typename CanonicalType>
+static hardware::hidl_vec<HalType> convertVecToV1_3(const std::vector<CanonicalType>& items) {
+ hardware::hidl_vec<HalType> result(items.size());
+ std::transform(items.begin(), items.end(), result.begin(),
+ [](const CanonicalType& item) { return convertToV1_3(item); });
+ return result;
+}
+
+V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) {
+ return nnTryGetValue(V1_2::utils::convert(outputShape));
+}
+
+hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes) {
+ return convertVecToV1_2<V1_2::OutputShape>(outputShapes);
+}
+
+V1_3::Model convertToV1_3(const Model& model) {
+ return nnTryGetValue(V1_3::utils::convert(model));
+}
+
+V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) {
+ return nnTryGetValue(V1_3::utils::convert(subgraph));
+}
+
+V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) {
+ return nnTryGetValue(V1_2::utils::convert(x));
+}
+
+V1_3::Request convertToV1_3(const Request& request) {
+ return nnTryGetValue(V1_3::utils::convert(request));
+}
+
+V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) {
+ return nnTryGetValue(V1_0::utils::convert(requestArgument));
+}
+
+V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) {
+ return nnTryGetValue(V1_3::utils::convert(memoryPool));
+}
+
+std::vector<Request::MemoryPool> uncheckedConvert(
+ const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools) {
+ return convertVec<Request::MemoryPool>(memoryPools);
+}
+
+V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) {
+ return nnTryGetValue(V1_3::utils::convert(timePoint));
+}
+
+V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) {
+ return nnTryGetValue(V1_3::utils::convert(timeoutDuration));
+}
+
+V1_2::Timing convertToV1_2(const Timing& timing) {
+ return nnTryGetValue(V1_2::utils::convert(timing));
+}
+
+V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) {
+ return nnTryGetValue(V1_3::utils::convert(bufferRole));
+}
+
+hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles) {
+ return convertVecToV1_3<V1_3::BufferRole>(bufferRoles);
+}
+
+hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues) {
+ return nnTryGetValue(V1_0::utils::convert(operandValues));
+}
+
+hardware::hidl_memory convertToV1_0(const Memory& memory) {
+ return nnTryGetValue(V1_0::utils::convert(memory));
+}
+
+Memory uncheckedConvert(const hardware::hidl_memory& memory) {
+ return nnTryGetValue(convert(memory));
+}
+
+hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories) {
+ return convertVecToV1_0<hardware::hidl_memory>(memories);
+}
+
+std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories) {
+ return convertVec<Memory>(memories);
+}
+
+std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs) {
+ return convertVec<Model::Subgraph>(subgraphs);
+}
+
+std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands) {
+ return convertVec<Operand>(operands);
+}
+
} // namespace nn
} // namespace android
diff --git a/nn/common/UtilsTest.cpp b/nn/common/UtilsTest.cpp
index 291a7102a..8bc8f2468 100644
--- a/nn/common/UtilsTest.cpp
+++ b/nn/common/UtilsTest.cpp
@@ -20,16 +20,18 @@
#include <utility>
#include <vector>
+#include "HalInterfaces.h"
#include "MemoryUtils.h"
#include "OperationsUtils.cpp"
#include "QuantUtils.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
namespace wrapper {
namespace {
-using namespace hal;
using ::testing::ElementsAreArray;
} // namespace
@@ -62,9 +64,8 @@ TEST(CalculateBroadcastedShapeTest, FailsOnIncompatible) {
}
static int32_t getExtensionType(uint16_t extensionPrefix, uint16_t typeWithinExtension) {
- constexpr uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
- int32_t type = (extensionPrefix << kLowBitsType) | typeWithinExtension;
- EXPECT_TRUE(isExtensionOperandType(static_cast<OperandType>(type)));
+ int32_t type = (extensionPrefix << kExtensionTypeBits) | typeWithinExtension;
+ EXPECT_TRUE(isExtensionOperandType(static_cast<V1_3::OperandType>(type)));
return type;
}
@@ -128,7 +129,7 @@ TEST(ValidateOperandTypeTest, TensorSizeDimensionProductOverflow) {
}
TEST(ValidateRequestTest, UnknownOutputRank) {
- Request::MemoryPool pool;
+ V1_3::Request::MemoryPool pool;
pool.hidlMemory(allocateSharedMemory(2 * sizeof(float)));
ASSERT_TRUE(pool.hidlMemory().valid());
const V1_3::Model model = {
@@ -170,7 +171,7 @@ TEST(ValidateRequestTest, UnknownOutputRank) {
}
TEST(ValidateRequestTest, ScalarOutput) {
- Request::MemoryPool pool;
+ V1_3::Request::MemoryPool pool;
pool.hidlMemory(allocateSharedMemory(sizeof(float) + sizeof(int32_t)));
ASSERT_TRUE(pool.hidlMemory().valid());
const V1_3::Model model = {
diff --git a/nn/common/ValidateHal.cpp b/nn/common/ValidateHal.cpp
index 46f9b2fce..c4e5f96c0 100644
--- a/nn/common/ValidateHal.cpp
+++ b/nn/common/ValidateHal.cpp
@@ -29,12 +29,11 @@
#include "OperationsUtils.h"
#include "Tracing.h"
#include "Utils.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
-using namespace hal;
-
template <class T_Model>
struct ModelToHalVersion;
template <>
@@ -56,27 +55,27 @@ struct ModelToHalVersion<V1_3::Model> {
class MemoryAccessVerifier {
public:
- MemoryAccessVerifier(const hidl_vec<hidl_memory>& pools)
+ MemoryAccessVerifier(const hardware::hidl_vec<hardware::hidl_memory>& pools)
: mPoolCount(pools.size()), mPoolSizes(mPoolCount) {
for (size_t i = 0; i < mPoolCount; i++) {
mPoolSizes[i] = pools[i].size();
}
}
- MemoryAccessVerifier(const hidl_vec<V1_3::Request::MemoryPool>& pools)
+ MemoryAccessVerifier(const hardware::hidl_vec<V1_3::Request::MemoryPool>& pools)
: mPoolCount(pools.size()), mPoolSizes(mPoolCount) {
for (size_t i = 0; i < mPoolCount; i++) {
switch (pools[i].getDiscriminator()) {
- case Request::MemoryPool::hidl_discriminator::hidlMemory:
+ case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
mPoolSizes[i] = pools[i].hidlMemory().size();
break;
- case Request::MemoryPool::hidl_discriminator::token:
+ case V1_3::Request::MemoryPool::hidl_discriminator::token:
// Set size to 0 to enforce length == 0 && offset == 0.
mPoolSizes[i] = 0;
break;
}
}
}
- bool validate(const DataLocation& location) const {
+ bool validate(const V1_0::DataLocation& location) const {
if (location.poolIndex >= mPoolCount) {
LOG(ERROR) << "Invalid poolIndex " << location.poolIndex << "/" << mPoolCount;
return false;
@@ -99,29 +98,29 @@ class MemoryAccessVerifier {
static bool validateOperandExtraParams(const V1_3::Operand& operand, uint32_t index) {
switch (operand.type) {
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_INT32:
- case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
- case OperandType::TENSOR_QUANT16_SYMM:
- case OperandType::TENSOR_BOOL8: {
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_BOOL8: {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::none)
+ V1_2::Operand::ExtraParams::hidl_discriminator::none)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type)
<< " has incorrect extraParams: " << toString(operand.extraParams);
} break;
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::channelQuant)
+ V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " without a Channel Quantization params";
auto& channelQuant = operand.extraParams.channelQuant();
@@ -151,9 +150,9 @@ static bool validateOperandExtraParams(const V1_3::Operand& operand, uint32_t in
default: {
if (isExtensionOperandType(operand.type)) {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::extension ||
+ V1_2::Operand::ExtraParams::hidl_discriminator::extension ||
operand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::none)
+ V1_2::Operand::ExtraParams::hidl_discriminator::none)
<< "Operand " << index << ": Extension operand of type "
<< getOperandTypeName(operand.type)
<< " has incorrect extraParams: " << toString(operand.extraParams);
@@ -165,10 +164,11 @@ static bool validateOperandExtraParams(const V1_3::Operand& operand, uint32_t in
}
template <typename VersionedOperand>
-static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
- const hidl_vec<uint8_t>& operandValues,
- const hidl_vec<hidl_memory>& pools,
- const hidl_vec<Subgraph>& subgraphs, bool allowUnspecifiedRank) {
+static bool validateOperands(const hardware::hidl_vec<VersionedOperand>& operands,
+ const hardware::hidl_vec<uint8_t>& operandValues,
+ const hardware::hidl_vec<hardware::hidl_memory>& pools,
+ const hardware::hidl_vec<V1_3::Subgraph>& subgraphs,
+ bool allowUnspecifiedRank) {
uint32_t index = 0;
MemoryAccessVerifier poolVerifier(pools);
for (auto& versionedOperand : operands) {
@@ -182,13 +182,13 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
V1_3::Operand operand = convertToV1_3(versionedOperand);
// Validate type and dimensions.
switch (operand.type) {
- case OperandType::FLOAT16:
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::OEM: {
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::OEM: {
size_t count = operand.dimensions.size();
if (count != 0) {
LOG(ERROR) << "Operand " << index << ": Scalar data has dimensions of rank "
@@ -197,19 +197,20 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
}
break;
}
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_INT32:
- case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
- case OperandType::TENSOR_QUANT16_SYMM:
- case OperandType::TENSOR_BOOL8:
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
- case OperandType::TENSOR_OEM_BYTE: {
- if ((!allowUnspecifiedRank || operand.lifetime == OperandLifeTime::CONSTANT_COPY ||
- operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) &&
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_OEM_BYTE: {
+ if ((!allowUnspecifiedRank ||
+ operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY ||
+ operand.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE) &&
operand.dimensions.size() == 0) {
LOG(ERROR) << "Operand " << index << ": Tensor has dimensions of rank 0";
return false;
@@ -227,16 +228,16 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
// Validate the scale.
switch (operand.type) {
- case OperandType::FLOAT16:
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_BOOL8:
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
if (operand.scale != 0.f) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-zero scale ("
@@ -244,7 +245,7 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_INT32:
// TENSOR_INT32 may be used with or without scale, depending on the operation.
if (operand.scale < 0.f) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
@@ -252,11 +253,11 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
- case OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
if (operand.scale <= 0.f) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-positive scale";
@@ -277,18 +278,18 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
// Validate the zeroPoint.
switch (operand.type) {
- case OperandType::FLOAT16:
- case OperandType::FLOAT32:
- case OperandType::INT32:
- case OperandType::UINT32:
- case OperandType::BOOL:
- case OperandType::SUBGRAPH:
- case OperandType::TENSOR_FLOAT16:
- case OperandType::TENSOR_FLOAT32:
- case OperandType::TENSOR_INT32:
- case OperandType::TENSOR_BOOL8:
- case OperandType::TENSOR_QUANT8_SYMM:
- case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
if (operand.zeroPoint != 0) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-zero zeroPoint "
@@ -296,7 +297,7 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
if (operand.zeroPoint < 0 || operand.zeroPoint > 255) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with an invalid zeroPoint "
@@ -304,7 +305,7 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
if (operand.zeroPoint < -128 || operand.zeroPoint > 127) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with an invalid zeroPoint "
@@ -312,7 +313,7 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
if (operand.zeroPoint < 0 || operand.zeroPoint > 65535) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with an invalid zeroPoint "
@@ -320,7 +321,7 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
if (operand.zeroPoint != 0) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " with a non-zero zeroPoint "
@@ -342,9 +343,9 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
NN_RET_CHECK(validateOperandExtraParams(operand, index));
// Validate the lifetime and the location.
- const DataLocation& location = operand.location;
+ const V1_0::DataLocation& location = operand.location;
switch (operand.lifetime) {
- case OperandLifeTime::CONSTANT_COPY:
+ case V1_3::OperandLifeTime::CONSTANT_COPY:
if (location.poolIndex != 0) {
LOG(ERROR) << "Operand " << index
<< ": CONSTANT_COPY with a non-zero poolIndex "
@@ -360,15 +361,15 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandLifeTime::CONSTANT_REFERENCE:
+ case V1_3::OperandLifeTime::CONSTANT_REFERENCE:
if (!poolVerifier.validate(location)) {
return false;
}
break;
- case OperandLifeTime::TEMPORARY_VARIABLE:
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
- case OperandLifeTime::NO_VALUE:
+ case V1_3::OperandLifeTime::TEMPORARY_VARIABLE:
+ case V1_3::OperandLifeTime::SUBGRAPH_INPUT:
+ case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT:
+ case V1_3::OperandLifeTime::NO_VALUE:
if (location.poolIndex != 0 || location.offset != 0 || location.length != 0) {
LOG(ERROR) << "Operand " << index << ": Unexpected poolIndex "
<< location.poolIndex << ", offset " << location.offset
@@ -377,14 +378,14 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
return false;
}
break;
- case OperandLifeTime::SUBGRAPH: {
+ case V1_3::OperandLifeTime::SUBGRAPH: {
if (location.poolIndex != 0) {
LOG(ERROR) << "Operand " << index << ": SUBGRAPH with a non-zero poolIndex "
<< location.poolIndex;
return false;
}
if (location.offset >= subgraphs.size()) {
- LOG(ERROR) << "Subgraph index out of range: " << location.offset
+ LOG(ERROR) << "Model::Subgraph index out of range: " << location.offset
<< " >= " << subgraphs.size();
return false;
}
@@ -401,8 +402,8 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
}
// Make sure SUBGRAPH operand type and lifetime always go together.
- if ((operand.type == OperandType::SUBGRAPH) !=
- (operand.lifetime == OperandLifeTime::SUBGRAPH)) {
+ if ((operand.type == V1_3::OperandType::SUBGRAPH) !=
+ (operand.lifetime == V1_3::OperandLifeTime::SUBGRAPH)) {
LOG(ERROR) << "Operand " << index << ": Operand of type " << toString(operand.type)
<< " cannot have lifetime " << toString(operand.lifetime);
return false;
@@ -410,10 +411,10 @@ static bool validateOperands(const hidl_vec<VersionedOperand>& operands,
// For constants, validate that the length is as expected. The other lifetimes
// expect the length to be 0. Don't validate for OEM types.
- if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
- operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
- if (!isExtensionOperandType(operand.type) && operand.type != OperandType::OEM &&
- operand.type != OperandType::TENSOR_OEM_BYTE) {
+ if (operand.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE ||
+ operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY) {
+ if (!isExtensionOperandType(operand.type) && operand.type != V1_3::OperandType::OEM &&
+ operand.type != V1_3::OperandType::TENSOR_OEM_BYTE) {
uint32_t expectedLength = nonExtensionOperandSizeOfData(operand);
if (location.length != expectedLength) {
LOG(ERROR) << "Operand " << index << ": For operand " << toString(operand)
@@ -446,19 +447,22 @@ static HalVersion getHalVersion(const V1_3::Operation&) {
}
template <typename VersionedOperation>
-static bool validateOperations(const hidl_vec<VersionedOperation>& operations,
- const hidl_vec<Operand>& operands,
- const hidl_vec<Subgraph>& subgraphs, ValidationMode mode) {
- auto isValidSubgraphReference = [&subgraphs](const Operand& modelOperand) -> bool {
+static bool validateOperations(const hardware::hidl_vec<VersionedOperation>& operations,
+ const hardware::hidl_vec<V1_3::Operand>& operands,
+ const hardware::hidl_vec<V1_3::Subgraph>& subgraphs,
+ ValidationMode mode) {
+ auto canonicalSubgraphs = uncheckedConvert(subgraphs);
+ auto isValidSubgraphReference = [&canonicalSubgraphs](const Operand& modelOperand) -> bool {
NN_RET_CHECK(modelOperand.type == OperandType::SUBGRAPH)
- << "Unexpected operand type: " << toString(modelOperand.type);
- NN_RET_CHECK_LT(modelOperand.location.offset, subgraphs.size())
+ << "Unexpected operand type: " << modelOperand.type;
+ NN_RET_CHECK_LT(modelOperand.location.offset, canonicalSubgraphs.size())
<< "Invalid subgraph reference";
return true;
};
- auto getSubgraph = [&subgraphs](const Operand& modelOperand) -> const Subgraph* {
- CHECK_LT(modelOperand.location.offset, subgraphs.size());
- return &subgraphs[modelOperand.location.offset];
+ auto getSubgraph =
+ [&canonicalSubgraphs](const Operand& modelOperand) -> const Model::Subgraph* {
+ CHECK_LT(modelOperand.location.offset, canonicalSubgraphs.size());
+ return &canonicalSubgraphs[modelOperand.location.offset];
};
auto getInputCount = [&getSubgraph](const Operand& modelOperand) -> uint32_t {
return getSubgraph(modelOperand)->inputIndexes.size();
@@ -468,32 +472,33 @@ static bool validateOperations(const hidl_vec<VersionedOperation>& operations,
};
auto getInputOperand = [&getSubgraph](const Operand& modelOperand,
uint32_t index) -> const Operand* {
- const Subgraph& subgraph = *getSubgraph(modelOperand);
+ const Model::Subgraph& subgraph = *getSubgraph(modelOperand);
CHECK_LT(subgraph.inputIndexes[index], subgraph.operands.size());
return &subgraph.operands[subgraph.inputIndexes[index]];
};
auto getOutputOperand = [&getSubgraph](const Operand& modelOperand,
uint32_t index) -> const Operand* {
- const Subgraph& subgraph = *getSubgraph(modelOperand);
+ const Model::Subgraph& subgraph = *getSubgraph(modelOperand);
CHECK_LT(subgraph.outputIndexes[index], subgraph.operands.size());
return &subgraph.operands[subgraph.outputIndexes[index]];
};
for (auto& op : operations) {
// TODO Validate the shapes and any known values. This is currently
// done in CpuExecutor but should be done here for all drivers.
- int error = validateOperation(
- static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, getHalVersion(op),
- {.isValidSubgraphReference = isValidSubgraphReference,
- .getSubgraphInputCount = getInputCount,
- .getSubgraphOutputCount = getOutputCount,
- .getSubgraphInputOperand = getInputOperand,
- .getSubgraphOutputOperand = getOutputOperand,
- // 1.3 HAL does not support CF operations with operands of
- // unknown size. See http://b/132458982#comment63.
- .allowControlFlowOperationWithOperandOfUnknownSize =
- mode == ValidationMode::RUNTIME});
+ int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr,
+ uncheckedConvert(operands), getHalVersion(op),
+ {.isValidSubgraphReference = isValidSubgraphReference,
+ .getSubgraphInputCount = getInputCount,
+ .getSubgraphOutputCount = getOutputCount,
+ .getSubgraphInputOperand = getInputOperand,
+ .getSubgraphOutputOperand = getOutputOperand,
+ // 1.3 HAL does not support CF operations with operands of
+ // unknown size. See http://b/132458982#comment63.
+ .allowControlFlowOperationWithOperandOfUnknownSize =
+ mode == ValidationMode::RUNTIME});
if (error != ANEURALNETWORKS_NO_ERROR) {
LOG(ERROR) << "Invalid operation " << toString(op.type);
return false;
@@ -503,9 +508,9 @@ static bool validateOperations(const hidl_vec<VersionedOperation>& operations,
// but it is retained here in order to emit more informative
// error messages.
for (uint32_t i : op.outputs) {
- const Operand& operand = operands[i];
- if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE &&
- operand.lifetime != OperandLifeTime::SUBGRAPH_OUTPUT) {
+ const V1_3::Operand& operand = operands[i];
+ if (operand.lifetime != V1_3::OperandLifeTime::TEMPORARY_VARIABLE &&
+ operand.lifetime != V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) {
LOG(ERROR) << "Writing to operand " << i << " with incompatible lifetime "
<< toString(operand.lifetime);
return false;
@@ -515,7 +520,7 @@ static bool validateOperations(const hidl_vec<VersionedOperation>& operations,
return true;
}
-bool validatePool(const hidl_memory& pool, HalVersion ver) {
+bool validatePool(const hardware::hidl_memory& pool, HalVersion ver) {
const auto& name = pool.name();
if (name != "ashmem" && name != "mmap_fd" &&
((ver < HalVersion::V1_2) ||
@@ -532,9 +537,9 @@ bool validatePool(const hidl_memory& pool, HalVersion ver) {
bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver) {
switch (pool.getDiscriminator()) {
- case Request::MemoryPool::hidl_discriminator::hidlMemory:
+ case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
return validatePool(pool.hidlMemory(), ver);
- case Request::MemoryPool::hidl_discriminator::token:
+ case V1_3::Request::MemoryPool::hidl_discriminator::token:
return pool.token() > 0;
}
LOG(FATAL) << "unknown MemoryPool discriminator";
@@ -542,20 +547,21 @@ bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver) {
}
template <class T_MemoryPool>
-static bool validatePools(const hidl_vec<T_MemoryPool>& pools, HalVersion ver) {
+static bool validatePools(const hardware::hidl_vec<T_MemoryPool>& pools, HalVersion ver) {
return std::all_of(pools.begin(), pools.end(),
[ver](const auto& pool) { return validatePool(pool, ver); });
}
-static bool validateModelInputOutputs(const hidl_vec<uint32_t> indexes,
- const hidl_vec<Operand>& operands, OperandLifeTime lifetime) {
+static bool validateModelInputOutputs(const hardware::hidl_vec<uint32_t> indexes,
+ const hardware::hidl_vec<V1_3::Operand>& operands,
+ V1_3::OperandLifeTime lifetime) {
const size_t operandCount = operands.size();
for (uint32_t i : indexes) {
if (i >= operandCount) {
LOG(ERROR) << "Model input or output index out of range: " << i << "/" << operandCount;
return false;
}
- const Operand& operand = operands[i];
+ const V1_3::Operand& operand = operands[i];
if (operand.lifetime != lifetime) {
LOG(ERROR) << "Model input or output operand " << i << " has lifetime of "
<< toString(operand.lifetime) << " instead of the expected "
@@ -596,12 +602,12 @@ static bool validateGraph(const VersionedModelOrSubgraph& model) {
// mark known operands
for (size_t i = 0; i < model.operands.size(); ++i) {
const auto& operand = model.operands[i];
- const OperandLifeTime lifetime = convertToV1_3(operand.lifetime);
- operandValueKnown[i] = lifetime == OperandLifeTime::SUBGRAPH_INPUT ||
- lifetime == OperandLifeTime::CONSTANT_COPY ||
- lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
- lifetime == OperandLifeTime::NO_VALUE ||
- lifetime == OperandLifeTime::SUBGRAPH;
+ const V1_3::OperandLifeTime lifetime = convertToV1_3(operand.lifetime);
+ operandValueKnown[i] = lifetime == V1_3::OperandLifeTime::SUBGRAPH_INPUT ||
+ lifetime == V1_3::OperandLifeTime::CONSTANT_COPY ||
+ lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE ||
+ lifetime == V1_3::OperandLifeTime::NO_VALUE ||
+ lifetime == V1_3::OperandLifeTime::SUBGRAPH;
}
// Validate that operations are sorted into execution order.
@@ -672,8 +678,8 @@ static bool checkNoReferenceCycles(const V1_3::Model& model, const V1_3::Subgrap
LOG(ERROR) << "Model contains a circular subgraph reference";
return false;
}
- for (const Operand& operand : subgraph.operands) {
- if (operand.lifetime == OperandLifeTime::SUBGRAPH) {
+ for (const V1_3::Operand& operand : subgraph.operands) {
+ if (operand.lifetime == V1_3::OperandLifeTime::SUBGRAPH) {
uint32_t refSubgraphIndex = operand.location.offset;
if (!checkNoReferenceCycles(model, model.referenced[refSubgraphIndex], path)) {
return false;
@@ -699,14 +705,14 @@ bool validateModel(const T_Model& model, ValidationMode mode) {
}
// We only need versioned operands for their validation. For all the other
// validations we can use operands upcasted to the latest version.
- const hidl_vec<Operand> latestVersionOperands = convertToV1_3(model.operands);
+ const hardware::hidl_vec<V1_3::Operand> latestVersionOperands = convertToV1_3(model.operands);
return (validateOperands(model.operands, model.operandValues, model.pools, /*subgraphs=*/{},
/*allowUnspecifiedRank=*/version >= HalVersion::V1_2) &&
validateOperations(model.operations, latestVersionOperands, /*subgraphs=*/{}, mode) &&
validateModelInputOutputs(model.inputIndexes, latestVersionOperands,
- OperandLifeTime::SUBGRAPH_INPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_INPUT) &&
validateModelInputOutputs(model.outputIndexes, latestVersionOperands,
- OperandLifeTime::SUBGRAPH_OUTPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) &&
validatePools(model.pools, version) && validateGraph(model));
}
@@ -721,15 +727,15 @@ bool validateModel(const V1_3::Model& model, ValidationMode mode) {
LOG(ERROR) << "Invalid empty model.";
return false;
}
- auto validateSubgraph = [&model, mode](const Subgraph& subgraph) -> bool {
+ auto validateSubgraph = [&model, mode](const V1_3::Subgraph& subgraph) -> bool {
return (validateOperands(subgraph.operands, model.operandValues, model.pools,
model.referenced, /*allowUnspecifiedRank=*/true) &&
validateOperations(subgraph.operations, subgraph.operands, model.referenced,
mode) &&
validateModelInputOutputs(subgraph.inputIndexes, subgraph.operands,
- OperandLifeTime::SUBGRAPH_INPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_INPUT) &&
validateModelInputOutputs(subgraph.outputIndexes, subgraph.operands,
- OperandLifeTime::SUBGRAPH_OUTPUT) &&
+ V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) &&
validateGraph(subgraph));
};
return (validateSubgraph(model.main) &&
@@ -740,11 +746,11 @@ bool validateModel(const V1_3::Model& model, ValidationMode mode) {
// Validates the arguments of a request. type is either "input" or "output" and is used
// for printing error messages. The operandIndexes is the appropriate array of input
// or output operand indexes that was passed to the ANeuralNetworksModel_identifyInputsAndOutputs.
-static bool validateRequestArguments(const hidl_vec<RequestArgument>& requestArguments,
- const hidl_vec<uint32_t>& operandIndexes,
- const hidl_vec<Operand>& operands,
- const MemoryAccessVerifier& poolVerifier,
- bool allowUnspecified, const char* type) {
+static bool validateRequestArguments(
+ const hardware::hidl_vec<V1_0::RequestArgument>& requestArguments,
+ const hardware::hidl_vec<uint32_t>& operandIndexes,
+ const hardware::hidl_vec<V1_3::Operand>& operands, const MemoryAccessVerifier& poolVerifier,
+ bool allowUnspecified, const char* type) {
// The request should specify as many arguments as were described in the model.
const size_t requestArgumentCount = requestArguments.size();
if (requestArgumentCount != operandIndexes.size()) {
@@ -754,13 +760,13 @@ static bool validateRequestArguments(const hidl_vec<RequestArgument>& requestArg
}
for (size_t requestArgumentIndex = 0; requestArgumentIndex < requestArgumentCount;
requestArgumentIndex++) {
- const RequestArgument& requestArgument = requestArguments[requestArgumentIndex];
- const DataLocation& location = requestArgument.location;
+ const V1_0::RequestArgument& requestArgument = requestArguments[requestArgumentIndex];
+ const V1_0::DataLocation& location = requestArgument.location;
// Get the operand index for this argument. We extract it from the list
// that was provided in the call to ANeuralNetworksModel_identifyInputsAndOutputs.
// We assume in this function that the model has been validated already.
const uint32_t operandIndex = operandIndexes[requestArgumentIndex];
- const Operand& operand = operands[operandIndex];
+ const V1_3::Operand& operand = operands[operandIndex];
if (requestArgument.hasNoValue) {
if (location.poolIndex != 0 || location.offset != 0 || location.length != 0 ||
requestArgument.dimensions.size() != 0) {
@@ -861,9 +867,9 @@ bool validateRequest(const V1_3::Request& request, const V1_3::Model& model,
}
bool validateMemoryDesc(const V1_3::BufferDesc& desc,
- const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<V1_3::BufferRole>& inputRoles,
- const hidl_vec<V1_3::BufferRole>& outputRoles,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles,
std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
std::set<PreparedModelRole>* preparedModelRoles,
V1_3::Operand* combinedOperand) {
@@ -939,14 +945,15 @@ bool validateMemoryDesc(const V1_3::BufferDesc& desc,
return true;
}
-bool validateExecutionPreference(ExecutionPreference preference) {
- return preference == ExecutionPreference::LOW_POWER ||
- preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
- preference == ExecutionPreference::SUSTAINED_SPEED;
+bool validateExecutionPreference(V1_1::ExecutionPreference preference) {
+ return preference == V1_1::ExecutionPreference::LOW_POWER ||
+ preference == V1_1::ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == V1_1::ExecutionPreference::SUSTAINED_SPEED;
}
-bool validatePriority(Priority priority) {
- return priority == Priority::LOW || priority == Priority::MEDIUM || priority == Priority::HIGH;
+bool validatePriority(V1_3::Priority priority) {
+ return priority == V1_3::Priority::LOW || priority == V1_3::Priority::MEDIUM ||
+ priority == V1_3::Priority::HIGH;
}
bool validOperandType(V1_0::OperandType operandType) {
diff --git a/nn/common/include/BufferTracker.h b/nn/common/include/BufferTracker.h
index feabda643..60432caaf 100644
--- a/nn/common/include/BufferTracker.h
+++ b/nn/common/include/BufferTracker.h
@@ -37,23 +37,23 @@ namespace android::nn {
class ManagedBuffer {
public:
static std::shared_ptr<ManagedBuffer> create(uint32_t size, std::set<PreparedModelRole> roles,
- const hal::Operand& operand);
+ const Operand& operand);
// Prefer ManagedBuffer::create.
ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
- std::set<PreparedModelRole> roles, const hal::Operand& operand);
+ std::set<PreparedModelRole> roles, const Operand& operand);
RunTimePoolInfo createRunTimePoolInfo() const {
return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize);
}
// "poolIndex" is the index of this buffer in the request.pools.
- hal::ErrorStatus validateRequest(uint32_t poolIndex, const hal::Request& request,
- const hal::IPreparedModel* preparedModel) const;
+ ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
+ const V1_3::IPreparedModel* preparedModel) const;
// "size" is the byte size of the hidl_memory provided to the copyFrom or copyTo method.
- hal::ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
- hal::ErrorStatus validateCopyTo(uint32_t size) const;
+ ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
+ ErrorStatus validateCopyTo(uint32_t size) const;
bool updateDimensions(const std::vector<uint32_t>& dimensions);
void setInitialized(bool initialized);
@@ -63,7 +63,7 @@ class ManagedBuffer {
const std::unique_ptr<uint8_t[]> kBuffer;
const uint32_t kSize;
const std::set<PreparedModelRole> kRoles;
- const hal::OperandType kOperandType;
+ const OperandType kOperandType;
const std::vector<uint32_t> kInitialDimensions;
std::vector<uint32_t> mUpdatedDimensions;
bool mInitialized = false;
diff --git a/nn/common/include/CpuExecutor.h b/nn/common/include/CpuExecutor.h
index edb233217..094572921 100644
--- a/nn/common/include/CpuExecutor.h
+++ b/nn/common/include/CpuExecutor.h
@@ -25,10 +25,10 @@
#include <vector>
#include "ControlFlow.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -37,7 +37,7 @@ namespace nn {
// may change during execution.
struct RunTimeOperandInfo {
// TODO Storing the type here is redundant, as it won't change during execution.
- hal::OperandType type;
+ OperandType type;
// The type and dimensions of the operand. The dimensions can
// change at runtime. We include the type because it's useful
// to pass together with the dimension to the functions implementing
@@ -64,14 +64,14 @@ struct RunTimeOperandInfo {
// The length of the buffer.
uint32_t length;
// Whether this is a temporary variable, a model input, a constant, etc.
- hal::OperandLifeTime lifetime;
+ Operand::LifeTime lifetime;
// Keeps track of how many operations have yet to make use
// of this temporary variable. When the count is decremented to 0,
// we free the buffer. For non-temporary variables, this count is
// always 0.
uint32_t numberOfUsesLeft;
- hal::OperandExtraParams extraParams;
+ Operand::ExtraParams extraParams;
Shape shape() const {
return {
@@ -84,7 +84,7 @@ struct RunTimeOperandInfo {
}
bool isSufficient() const {
- if (isExtensionOperandType(type)) {
+ if (isExtension(type)) {
// We don't know sizes of extension types.
return true;
}
@@ -98,19 +98,20 @@ struct RunTimeOperandInfo {
// may reference the same region of memory by either:
// (1) copying an existing RunTimePoolInfo object, or
// (2) creating multiple RunTimePoolInfo objects from the same memory resource
-// (e.g., "createFromHidlMemory" or "createFromExistingBuffer")
+// (e.g., "createFromMemory" or "createFromExistingBuffer")
//
-// If the underlying region of memory is mapped by "createFromHidlMemory", the
+// If the underlying region of memory is mapped by "createFromMemory", the
// mapping will be sustained until it is no longer referenced by any
// RunTimePoolInfo objects.
class RunTimePoolInfo {
public:
- static std::optional<RunTimePoolInfo> createFromHidlMemory(const hal::hidl_memory& hidlMemory);
+ static std::optional<RunTimePoolInfo> createFromMemory(const Memory& memory);
static RunTimePoolInfo createFromExistingBuffer(uint8_t* buffer, uint32_t size = 0);
uint8_t* getBuffer() const;
bool flush() const;
- const hal::hidl_memory& getHidlMemory() const;
+ // TODO(b/169672209): "const Memory& getMemory() const;"
+ Memory getMemory() const;
uint32_t getSize() const;
private:
@@ -120,11 +121,20 @@ class RunTimePoolInfo {
std::shared_ptr<const RunTimePoolInfoImpl> mImpl;
};
-bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
- const hal::hidl_vec<hal::hidl_memory>& pools);
+bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos,
+ const std::vector<Memory>& pools);
+
+// DEPRECATED. Use setRunTimePoolInfosFromCanonicalMemories().
+//
+// Used by external code.
+inline bool setRunTimePoolInfosFromHidlMemories(
+ std::vector<RunTimePoolInfo>* poolInfos,
+ const hardware::hidl_vec<hardware::hidl_memory>& pools) {
+ return setRunTimePoolInfosFromCanonicalMemories(poolInfos, uncheckedConvert(pools));
+}
bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos,
- const hal::hidl_vec<hal::Request::MemoryPool>& pools);
+ const std::vector<Request::MemoryPool>& pools);
// This class is used to execute a model on the CPU.
class CpuExecutor {
@@ -146,11 +156,11 @@ class CpuExecutor {
// specified in the constructor.
// The model must outlive the executor. We prevent it from being modified
// while this is executing.
- int run(const hal::Model& model, const hal::Request& request,
+ int run(const Model& model, const Request& request,
const std::vector<RunTimePoolInfo>& modelPoolInfos,
const std::vector<RunTimePoolInfo>& requestPoolInfos);
- const std::vector<hal::OutputShape>& getOutputShapes() const {
+ const std::vector<OutputShape>& getOutputShapes() const {
CHECK(mFinished) << "getOutputShapes() called by an unfinished CpuExecutor.";
return mOutputShapes;
}
@@ -160,31 +170,31 @@ class CpuExecutor {
private:
// Creates runtime info from what's in the model.
- std::vector<RunTimeOperandInfo> initializeRunTimeInfo(const hal::Subgraph& subgraph);
+ std::vector<RunTimeOperandInfo> initializeRunTimeInfo(const Model::Subgraph& subgraph);
// Adjusts the runtime info for the arguments passed to the model,
// modifying the buffer location, and possibly the dimensions.
void updateForArguments(const std::vector<uint32_t>& indexes,
- const hal::hidl_vec<hal::RequestArgument>& arguments,
+ const std::vector<Request::Argument>& arguments,
const std::vector<RunTimePoolInfo>& requestPoolInfos,
RunTimeOperandInfo* operands);
// Runs one subgraph.
- int executeSubgraph(const hal::Subgraph& subgraph, RunTimeOperandInfo* operands);
+ int executeSubgraph(const Model::Subgraph& subgraph, RunTimeOperandInfo* operands);
// Runs one operation of the graph.
- int executeOperation(const hal::Operation& operation, RunTimeOperandInfo* operands);
- int executeIfOperation(const hal::Operation& operation, RunTimeOperandInfo* operands);
- int executeWhileOperation(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ int executeOperation(const Operation& operation, RunTimeOperandInfo* operands);
+ int executeIfOperation(const Operation& operation, RunTimeOperandInfo* operands);
+ int executeWhileOperation(const Operation& operation, RunTimeOperandInfo* operands);
void setOutputShapes(const std::vector<uint32_t>& outputIndexes,
const std::vector<RunTimeOperandInfo>& operands);
// Compile-time operand value information used by initializeRunTimeInfo.
// The fields are only valid while run() is being executed.
- const hal::hidl_vec<uint8_t>* mModelOperandValues = nullptr;
+ const uint8_t* mModelOperandValues = nullptr;
const std::vector<RunTimePoolInfo>* mModelPoolInfos = nullptr;
- const hal::hidl_vec<hal::Subgraph>* mReferencedSubgraphs = nullptr;
+ const std::vector<Model::Subgraph>* mReferencedSubgraphs = nullptr;
// The output operand shapes returning to the runtime.
- std::vector<hal::OutputShape> mOutputShapes;
+ std::vector<OutputShape> mOutputShapes;
// Whether execution is finished and mOutputShapes is ready
bool mFinished = false;
@@ -259,17 +269,16 @@ T getScalarDataWithDefault(const RunTimeOperandInfo& info, T defaultValue) {
}
inline bool IsNullInput(const RunTimeOperandInfo* input) {
- return input->lifetime == hal::OperandLifeTime::NO_VALUE;
+ return input->lifetime == Operand::LifeTime::NO_VALUE;
}
-inline int NumInputsWithValues(const hal::Operation& operation,
- const RunTimeOperandInfo* operands) {
+inline int NumInputsWithValues(const Operation& operation, const RunTimeOperandInfo* operands) {
const std::vector<uint32_t>& inputs = operation.inputs;
return std::count_if(inputs.begin(), inputs.end(),
[&operands](uint32_t i) { return !IsNullInput(&operands[i]); });
}
-inline int NumOutputs(const hal::Operation& operation) {
+inline int NumOutputs(const Operation& operation) {
return operation.outputs.size();
}
@@ -281,12 +290,12 @@ inline uint32_t SizeOfDimension(const RunTimeOperandInfo* operand, int i) {
return operand->shape().dimensions[i];
}
-inline RunTimeOperandInfo* GetInput(const hal::Operation& operation, RunTimeOperandInfo* operands,
+inline RunTimeOperandInfo* GetInput(const Operation& operation, RunTimeOperandInfo* operands,
int index) {
return &operands[operation.inputs[index]];
}
-inline RunTimeOperandInfo* GetOutput(const hal::Operation& operation, RunTimeOperandInfo* operands,
+inline RunTimeOperandInfo* GetOutput(const Operation& operation, RunTimeOperandInfo* operands,
int index) {
return &operands[operation.outputs[index]];
}
diff --git a/nn/common/include/GraphDump.h b/nn/common/include/GraphDump.h
index 207afe507..208b4ec18 100644
--- a/nn/common/include/GraphDump.h
+++ b/nn/common/include/GraphDump.h
@@ -17,10 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H
-#include <android/hardware/neuralnetworks/1.3/types.h>
-
#include <iostream>
+#include "nnapi/Types.h"
+
namespace android {
namespace nn {
@@ -45,8 +45,7 @@ namespace nn {
// A model input or output (operand) is shown in "reverse colors" --
// white text on a black background.
//
-void graphDump(const char* name, const ::android::hardware::neuralnetworks::V1_3::Model& model,
- std::ostream* outStream = nullptr);
+void graphDump(const char* name, const Model& model, std::ostream* outStream = nullptr);
} // namespace nn
} // namespace android
diff --git a/nn/common/include/HalInterfaces.h b/nn/common/include/HalInterfaces.h
index 4e3a3800b..8eeb23d6d 100644
--- a/nn/common/include/HalInterfaces.h
+++ b/nn/common/include/HalInterfaces.h
@@ -40,74 +40,20 @@
#include <functional>
-namespace android::nn::hal {
+namespace android::nn {
-using android::sp;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
-using hardware::hidl_death_recipient;
-using hardware::hidl_enum_range;
-using hardware::hidl_handle;
-using hardware::hidl_memory;
-using hardware::hidl_string;
-using hardware::hidl_vec;
-using hardware::Return;
-using hardware::Void;
+using HalCacheToken =
+ hardware::hidl_array<uint8_t,
+ static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using HalDeviceFactory = std::function<sp<V1_0::IDevice>(bool blocking)>;
-using hidl::memory::V1_0::IMemory;
+inline constexpr V1_3::Priority kDefaultPriority13 = V1_3::Priority::MEDIUM;
-namespace V1_0 = hardware::neuralnetworks::V1_0;
-namespace V1_1 = hardware::neuralnetworks::V1_1;
-namespace V1_2 = hardware::neuralnetworks::V1_2;
-namespace V1_3 = hardware::neuralnetworks::V1_3;
-
-using V1_0::DataLocation;
-using V1_0::DeviceStatus;
-using V1_0::FusedActivationFunc;
-using V1_0::PerformanceInfo;
-using V1_0::RequestArgument;
-using V1_1::ExecutionPreference;
-using V1_2::Constant;
-using V1_2::DeviceType;
-using V1_2::Extension;
-using V1_2::MeasureTiming;
-using V1_2::OutputShape;
-using V1_2::SymmPerChannelQuantParams;
-using V1_2::Timing;
-using V1_3::BufferDesc;
-using V1_3::BufferRole;
-using V1_3::Capabilities;
-using V1_3::ErrorStatus;
-using V1_3::IBuffer;
-using V1_3::IDevice;
-using V1_3::IExecutionCallback;
-using V1_3::IFencedExecutionCallback;
-using V1_3::IPreparedModel;
-using V1_3::IPreparedModelCallback;
-using V1_3::LoopTimeoutDurationNs;
-using V1_3::Model;
-using V1_3::Operand;
-using V1_3::OperandLifeTime;
-using V1_3::OperandType;
-using V1_3::OperandTypeRange;
-using V1_3::Operation;
-using V1_3::OperationType;
-using V1_3::OperationTypeRange;
-using V1_3::OptionalTimeoutDuration;
-using V1_3::OptionalTimePoint;
-using V1_3::Priority;
-using V1_3::Request;
-using V1_3::Subgraph;
-using ExtensionNameAndPrefix = V1_2::Model::ExtensionNameAndPrefix;
-using ExtensionTypeEncoding = V1_2::Model::ExtensionTypeEncoding;
-using OperandExtraParams = V1_2::Operand::ExtraParams;
-
-using CacheToken =
- hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
-using DeviceFactory = std::function<sp<V1_0::IDevice>(bool blocking)>;
-using ModelFactory = std::function<Model()>;
-
-inline constexpr Priority kDefaultPriority = Priority::MEDIUM;
-
-} // namespace android::nn::hal
+} // namespace android::nn
#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_INTERFACES_H
diff --git a/nn/common/include/MetaModel.h b/nn/common/include/MetaModel.h
index 154a453fd..3cb87f31d 100644
--- a/nn/common/include/MetaModel.h
+++ b/nn/common/include/MetaModel.h
@@ -17,9 +17,8 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_META_MODEL_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_META_MODEL_H
-#include "HalInterfaces.h"
-
#include <android-base/macros.h>
+
#include <functional>
#include <map>
#include <optional>
@@ -27,6 +26,10 @@
#include <utility>
#include <vector>
+#include "HalInterfaces.h"
+#include "Utils.h"
+#include "nnapi/Types.h"
+
namespace android::nn {
// The MetaModel class encapsulates a Model and provides machinery to create
@@ -67,14 +70,15 @@ class MetaModel {
template <class T_Model>
using ReturnedSlice = std::optional<std::pair<T_Model, Mapper>>;
- MetaModel(hal::Model model, bool strictSlicing)
- : mHidlModel(std::move(model)), mStrictSlicing(strictSlicing) {}
+ MetaModel(Model model, bool strictSlicing)
+ : mModel(std::move(model)), mStrictSlicing(strictSlicing) {}
- const hal::Model& getModel() const { return mHidlModel; }
+ const Model& getModel() const { return mModel; }
- ReturnedSlice<hal::V1_0::Model> getSliceV1_0() const { return getSlice(&mSliceV1_0); }
- ReturnedSlice<hal::V1_1::Model> getSliceV1_1() const { return getSlice(&mSliceV1_1); }
- ReturnedSlice<hal::V1_2::Model> getSliceV1_2() const { return getSlice(&mSliceV1_2); }
+ ReturnedSlice<V1_0::Model> getSliceV1_0() const { return getSlice(&mSliceV1_0); }
+ ReturnedSlice<V1_1::Model> getSliceV1_1() const { return getSlice(&mSliceV1_1); }
+ ReturnedSlice<V1_2::Model> getSliceV1_2() const { return getSlice(&mSliceV1_2); }
+ ReturnedSlice<V1_3::Model> getSliceV1_3() const { return getSlice(&mSliceV1_3); }
// Disallowing copy constructor and assignment operator is for efficiency,
// not for correctness. The default copy constructor and assignment
@@ -92,7 +96,7 @@ class MetaModel {
MetaModel& operator=(MetaModel&&) = default;
private:
- hal::Model mHidlModel;
+ Model mModel;
// mStrictSlicing controls validity checking. If the slicing algorithm
// produces an invalid model (because something has gone wrong with the
@@ -114,12 +118,20 @@ class MetaModel {
using Operation = typename decltype(mHidlModel.operations)::value_type;
using OperationType = decltype(Operation::type);
};
- mutable Slice<hal::V1_0::Model> mSliceV1_0;
- mutable Slice<hal::V1_1::Model> mSliceV1_1;
- mutable Slice<hal::V1_2::Model> mSliceV1_2;
+ template <>
+ struct Slice<V1_3::Model> { // Trivial slice.
+ SliceState mState = SliceState::UNINITIALIZED;
+ V1_3::Model mHidlModel;
+ };
+ mutable Slice<V1_0::Model> mSliceV1_0;
+ mutable Slice<V1_1::Model> mSliceV1_1;
+ mutable Slice<V1_2::Model> mSliceV1_2;
+ mutable Slice<V1_3::Model> mSliceV1_3;
template <class T_SlicedModel>
ReturnedSlice<T_SlicedModel> getSlice(Slice<T_SlicedModel>* slice) const;
+ template <>
+ ReturnedSlice<V1_3::Model> getSlice(Slice<V1_3::Model>* slice) const;
template <class T_SlicedModel>
Slice<T_SlicedModel> makeSlice() const;
diff --git a/nn/common/include/OperationResolver.h b/nn/common/include/OperationResolver.h
index ab70e4c3a..700513d13 100644
--- a/nn/common/include/OperationResolver.h
+++ b/nn/common/include/OperationResolver.h
@@ -25,7 +25,7 @@ namespace nn {
// Encapsulates an operation implementation.
struct OperationRegistration {
- hal::OperationType type;
+ OperationType type;
const char* name;
// Validates operand types, shapes, and any values known during graph creation.
@@ -47,7 +47,7 @@ struct OperationRegistration {
bool allowZeroSizedInput = false;
} flags;
- OperationRegistration(hal::OperationType type, const char* name,
+ OperationRegistration(OperationType type, const char* name,
std::function<bool(const IOperationValidationContext*)> validate,
std::function<bool(IOperationExecutionContext*)> prepare,
std::function<bool(IOperationExecutionContext*)> execute, Flag flags)
@@ -62,7 +62,7 @@ struct OperationRegistration {
// A registry of operation implementations.
class IOperationResolver {
public:
- virtual const OperationRegistration* findOperation(hal::OperationType operationType) const = 0;
+ virtual const OperationRegistration* findOperation(OperationType operationType) const = 0;
virtual ~IOperationResolver() {}
};
@@ -86,7 +86,7 @@ class BuiltinOperationResolver : public IOperationResolver {
return &instance;
}
- const OperationRegistration* findOperation(hal::OperationType operationType) const override;
+ const OperationRegistration* findOperation(OperationType operationType) const override;
private:
BuiltinOperationResolver();
@@ -116,11 +116,11 @@ class BuiltinOperationResolver : public IOperationResolver {
// .allowZeroSizedInput = true);
//
#ifdef NN_INCLUDE_CPU_IMPLEMENTATION
-#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \
- const OperationRegistration* register_##identifier() { \
- static OperationRegistration registration(hal::OperationType::identifier, operationName, \
- validate, prepare, execute, {__VA_ARGS__}); \
- return &registration; \
+#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \
+ const OperationRegistration* register_##identifier() { \
+ static OperationRegistration registration(OperationType::identifier, operationName, \
+ validate, prepare, execute, {__VA_ARGS__}); \
+ return &registration; \
}
#else
// This version ignores CPU execution logic (prepare and execute).
@@ -129,7 +129,7 @@ class BuiltinOperationResolver : public IOperationResolver {
#define NN_REGISTER_OPERATION(identifier, operationName, validate, unused_prepare, unused_execute, \
...) \
const OperationRegistration* register_##identifier() { \
- static OperationRegistration registration(hal::OperationType::identifier, operationName, \
+ static OperationRegistration registration(OperationType::identifier, operationName, \
validate, nullptr, nullptr, {__VA_ARGS__}); \
return &registration; \
}
diff --git a/nn/common/include/OperationsUtils.h b/nn/common/include/OperationsUtils.h
index a8a07db9c..9b0a9bdaa 100644
--- a/nn/common/include/OperationsUtils.h
+++ b/nn/common/include/OperationsUtils.h
@@ -23,6 +23,7 @@
#include "HalInterfaces.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -45,11 +46,11 @@ enum PaddingScheme {
// Stores operand type information. "Shape" is a historical name.
struct Shape {
- hal::OperandType type = hal::OperandType::FLOAT32;
+ OperandType type = OperandType::FLOAT32;
std::vector<uint32_t> dimensions;
float scale = 0.0f;
int32_t offset = 0;
- hal::OperandExtraParams extraParams;
+ Operand::ExtraParams extraParams;
};
// Provides information available during graph creation to validate an operation.
@@ -76,12 +77,12 @@ class IOperationValidationContext {
virtual HalVersion getHalVersion() const = 0;
virtual uint32_t getNumInputs() const = 0;
- virtual hal::OperandType getInputType(uint32_t index) const = 0;
+ virtual OperandType getInputType(uint32_t index) const = 0;
virtual Shape getInputShape(uint32_t index) const = 0;
- virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0;
+ virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0;
virtual uint32_t getNumOutputs() const = 0;
- virtual hal::OperandType getOutputType(uint32_t index) const = 0;
+ virtual OperandType getOutputType(uint32_t index) const = 0;
virtual Shape getOutputShape(uint32_t index) const = 0;
};
@@ -91,13 +92,13 @@ class IOperationExecutionContext {
virtual ~IOperationExecutionContext() {}
virtual uint32_t getNumInputs() const = 0;
- virtual hal::OperandType getInputType(uint32_t index) const = 0;
+ virtual OperandType getInputType(uint32_t index) const = 0;
virtual Shape getInputShape(uint32_t index) const = 0;
virtual const void* getInputBuffer(uint32_t index) const = 0;
- virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0;
+ virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0;
virtual uint32_t getNumOutputs() const = 0;
- virtual hal::OperandType getOutputType(uint32_t index) const = 0;
+ virtual OperandType getOutputType(uint32_t index) const = 0;
virtual Shape getOutputShape(uint32_t index) const = 0;
virtual void* getOutputBuffer(uint32_t index) = 0;
@@ -125,11 +126,11 @@ class IOperationExecutionContext {
// Verifies that the number and types of operation inputs are as expected.
bool validateInputTypes(const IOperationValidationContext* context,
- const std::vector<hal::OperandType>& expectedTypes);
+ const std::vector<OperandType>& expectedTypes);
// Verifies that the number and types of operation outputs are as expected.
bool validateOutputTypes(const IOperationValidationContext* context,
- const std::vector<hal::OperandType>& expectedTypes);
+ const std::vector<OperandType>& expectedTypes);
// Verifies that the HAL version specified in the context is greater or equal
// than the minimal supported HAL version.
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index a2919091a..1d4c6811c 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -28,6 +28,8 @@
#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "ValidateHal.h"
+#include "nnapi/TypeUtils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -135,24 +137,36 @@ void initVLogMask();
#define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=)
#define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >)
+// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds
+// the max duration, return the maximum expressible duration.
+TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds);
+
// Type to represent a deadline time point across processes.
using Deadline = std::chrono::steady_clock::time_point;
// Make an Deadline from a duration. If the sum of the current time and the
// duration exceeds the max time, return a time point holding the maximum
// expressible time.
-Deadline makeDeadline(uint64_t duration);
+Deadline makeDeadline(TimeoutDuration duration);
+inline Deadline makeDeadline(uint64_t duration) {
+ return makeDeadline(makeTimeoutDuration(duration));
+}
// Convenience function. If the duration is provided, this function creates a
// Deadline using makeDeadline. If the duration is not provided, this function
// returns std::nullopt.
-std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration);
+inline std::optional<Deadline> makeDeadline(OptionalTimeoutDuration duration) {
+ return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+}
+inline std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
+ return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
+}
// Make an optional Deadline from an OptionalTimePoint. If
// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a
// time point holding the maximum Deadline. If the OptionalTimePoint is none,
// this function returns std::nullopt.
-std::optional<Deadline> makeDeadline(const hal::OptionalTimePoint& timePoint);
+std::optional<Deadline> makeDeadline(const V1_3::OptionalTimePoint& timePoint);
// Returns true if the deadline has passed. Returns false if either the deadline
// has not been exceeded or if the deadline is not present.
@@ -160,7 +174,7 @@ bool hasDeadlinePassed(const std::optional<Deadline>& deadline);
// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not
// provided, this function returns none for OptionalTimePoint.
-hal::OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
+OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
// Ensure that every user of FalseyErrorStream is linked to the
// correct instance, using the correct LOG_TAG
@@ -193,14 +207,14 @@ struct VersionedType {};
template <>
struct VersionedType<HalVersion::V1_2> {
- using OperandPerformance = hal::V1_2::Capabilities::OperandPerformance;
- using OperandType = hal::V1_2::OperandType;
+ using OperandPerformance = V1_2::Capabilities::OperandPerformance;
+ using OperandType = V1_2::OperandType;
};
template <>
struct VersionedType<HalVersion::V1_3> {
- using OperandPerformance = hal::V1_3::Capabilities::OperandPerformance;
- using OperandType = hal::V1_3::OperandType;
+ using OperandPerformance = V1_3::Capabilities::OperandPerformance;
+ using OperandType = V1_3::OperandType;
};
template <HalVersion version>
@@ -218,32 +232,32 @@ using VersionedOperandType = typename VersionedType<version>::OperandType;
// separately using Capabilities::ifPerformance and
// Capabilities::whilePerformance.
template <HalVersion version>
-hal::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
- hal::PerformanceInfo perf);
+hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ V1_0::PerformanceInfo perf);
// Update the vector entry corresponding to the specified OperandType with the
// specified PerformanceInfo value. The vector must already have an entry for
// that OperandType, and must be sorted by OperandType.
-void update(hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>* operandPerformance,
- hal::V1_2::OperandType type, hal::PerformanceInfo perf);
-void update(hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>* operandPerformance,
- hal::V1_3::OperandType type, hal::PerformanceInfo perf);
+void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, V1_0::PerformanceInfo perf);
+void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, V1_0::PerformanceInfo perf);
// Look for a vector entry corresponding to the specified OperandType. If
// found, return the associated PerformanceInfo. If not, return a pessimistic
// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
-hal::PerformanceInfo lookup(
- const hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>& operandPerformance,
- hal::V1_2::OperandType type);
-hal::PerformanceInfo lookup(
- const hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>& operandPerformance,
- hal::V1_3::OperandType type);
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type);
+V1_0::PerformanceInfo lookup(
+ const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type);
// Returns true if an operand type is an extension type.
-bool isExtensionOperandType(hal::OperandType type);
+bool isExtensionOperandType(V1_3::OperandType type);
// Returns true if an operation type is an extension type.
-bool isExtensionOperationType(hal::OperationType type);
+bool isExtensionOperationType(V1_3::OperationType type);
// Returns the amount of space needed to store a value of the specified
// dimensions and type. For a tensor with unspecified rank or at least one
@@ -253,8 +267,9 @@ bool isExtensionOperationType(hal::OperationType type);
// Aborts if the size would overflow the return type.
//
// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
-uint32_t nonExtensionOperandSizeOfData(hal::OperandType type,
+uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type,
const std::vector<uint32_t>& dimensions);
+uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
// Returns the amount of space needed to store a value of the dimensions and
// type of this operand. For a tensor with unspecified rank or at least one
@@ -264,7 +279,10 @@ uint32_t nonExtensionOperandSizeOfData(hal::OperandType type,
// Aborts if the size would overflow the return type.
//
// See also TypeManager::getSizeOfData(const Operand&).
-inline uint32_t nonExtensionOperandSizeOfData(const hal::Operand& operand) {
+inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
+ return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
+}
+inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) {
return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
}
@@ -283,7 +301,9 @@ uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& d
// Aborts if the specified type is an extension type.
//
// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
-bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
+ const std::vector<uint32_t>& dimensions);
+bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type,
const std::vector<uint32_t>& dimensions);
// Returns true if the amount of space needed to store a value of the specified
@@ -300,17 +320,21 @@ bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uin
bool nonExtensionOperandTypeIsScalar(int type);
// Returns the name of the operation type in ASCII.
-std::string getOperationName(hal::OperationType opCode);
+std::string getOperationName(V1_3::OperationType opCode);
// Returns the name of the operand type in ASCII.
-std::string getOperandTypeName(hal::OperandType type);
+std::string getOperandTypeName(V1_3::OperandType type);
// Whether an operand of tensor type has unspecified dimensions.
//
// Undefined behavior if the operand type is a scalar type.
bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
-bool tensorHasUnspecifiedDimensions(hal::OperandType type, const std::vector<uint32_t>& dimensions);
-bool tensorHasUnspecifiedDimensions(const hal::Operand& operand);
+bool tensorHasUnspecifiedDimensions(V1_3::OperandType type,
+ const std::vector<uint32_t>& dimensions);
+bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions);
+bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions);
+bool tensorHasUnspecifiedDimensions(const Operand& operand);
+bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand);
bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
// Returns the number of padding bytes needed to align data of the
@@ -323,10 +347,11 @@ bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
uint32_t alignBytesNeeded(uint32_t index, size_t length);
// Does a detailed LOG(INFO) of the model
-void logModelToInfo(const hal::V1_0::Model& model);
-void logModelToInfo(const hal::V1_1::Model& model);
-void logModelToInfo(const hal::V1_2::Model& model);
-void logModelToInfo(const hal::V1_3::Model& model);
+void logModelToInfo(const V1_0::Model& model);
+void logModelToInfo(const V1_1::Model& model);
+void logModelToInfo(const V1_2::Model& model);
+void logModelToInfo(const V1_3::Model& model);
+void logModelToInfo(const Model& model);
inline std::string toString(uint32_t obj) {
return std::to_string(obj);
@@ -344,22 +369,22 @@ std::string toString(const std::vector<Type>& range) {
template <typename A, typename B>
std::string toString(const std::pair<A, B>& pair) {
std::ostringstream oss;
- oss << "(" << toString(pair.first) << ", " << toString(pair.second) << ")";
+ oss << "(" << pair.first << ", " << pair.second << ")";
return oss.str();
}
-inline std::string toString(HalVersion halVersion) {
+inline std::ostream& operator<<(std::ostream& os, const HalVersion& halVersion) {
switch (halVersion) {
case HalVersion::UNKNOWN:
- return "UNKNOWN HAL version";
+ return os << "UNKNOWN HAL version";
case HalVersion::V1_0:
- return "HAL version 1.0";
+ return os << "HAL version 1.0";
case HalVersion::V1_1:
- return "HAL version 1.1";
+ return os << "HAL version 1.1";
case HalVersion::V1_2:
- return "HAL version 1.2";
+ return os << "HAL version 1.2";
case HalVersion::V1_3:
- return "HAL version 1.3";
+ return os << "HAL version 1.3";
}
}
@@ -368,7 +393,7 @@ inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code)
}
bool validateOperandSymmPerChannelQuantParams(
- const hal::Operand& halOperand,
+ const V1_3::Operand& halOperand,
const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
// Validates an operand type.
@@ -376,25 +401,24 @@ bool validateOperandSymmPerChannelQuantParams(
// extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
//
// If allowPartial is true, the dimensions may be underspecified.
-int validateOperandType(
- const ANeuralNetworksOperandType& type,
- const hal::Extension::OperandTypeInformation* const extensionOperandTypeInfo,
- const char* tag, bool allowPartial);
+int validateOperandType(const ANeuralNetworksOperandType& type,
+ const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
+ const char* tag, bool allowPartial);
int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
const char* tag);
// A set of functions to help validate models containing IF or WHILE operations.
struct SubgraphValidationHelper {
// Checks if a given operand is a SUBGRAPH operand with a valid offset.
- std::function<bool(const hal::Operand&)> isValidSubgraphReference;
+ std::function<bool(const Operand&)> isValidSubgraphReference;
// Gets the input count of a subgraph referenced by a given operand.
- std::function<uint32_t(const hal::Operand&)> getSubgraphInputCount;
+ std::function<uint32_t(const Operand&)> getSubgraphInputCount;
// Gets the output count of a subgraph referenced by a given operand.
- std::function<uint32_t(const hal::Operand&)> getSubgraphOutputCount;
+ std::function<uint32_t(const Operand&)> getSubgraphOutputCount;
// Gets the specified input operand of a subgraph referenced by a given operand.
- std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphInputOperand;
+ std::function<const Operand*(const Operand&, uint32_t)> getSubgraphInputOperand;
// Gets the specified output operand of a subgraph referenced by a given operand.
- std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphOutputOperand;
+ std::function<const Operand*(const Operand&, uint32_t)> getSubgraphOutputOperand;
// Whether control flow operations with inner or outer input or output
// operands of unknown size are allowed.
bool allowControlFlowOperationWithOperandOfUnknownSize;
@@ -405,7 +429,7 @@ struct SubgraphValidationHelper {
// The last argument is only used for validating IF and WHILE operations.
int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
const uint32_t* inputIndexes, uint32_t outputCount,
- const uint32_t* outputIndexes, const std::vector<hal::Operand>& operands,
+ const uint32_t* outputIndexes, const std::vector<Operand>& operands,
HalVersion halVersion, const SubgraphValidationHelper& helper);
inline size_t getSizeFromInts(int lower, int higher) {
@@ -414,40 +438,41 @@ inline size_t getSizeFromInts(int lower, int higher) {
// Convert ANEURALNETWORKS_* result code to ErrorStatus.
// Not guaranteed to be a 1-to-1 mapping.
-hal::ErrorStatus convertResultCodeToErrorStatus(int resultCode);
+ErrorStatus convertResultCodeToErrorStatus(int resultCode);
+V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode);
// Convert ErrorStatus to ANEURALNETWORKS_* result code.
// Not guaranteed to be a 1-to-1 mapping.
-int convertErrorStatusToResultCode(hal::ErrorStatus status);
+int convertErrorStatusToResultCode(ErrorStatus status);
+int convertErrorStatusToResultCode(V1_3::ErrorStatus status);
// Convert execution results to runtime format. Additionally checks that the
// returned results abide by the HAL specification, and logs an error if the
// result violates the specification.
-std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> getExecutionResult(
- hal::ErrorStatus status, std::vector<hal::OutputShape> outputShapes, hal::Timing timing);
-
-// Combine two tensor dimensions, both may have unspecified dimensions or rank.
-std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
- const std::vector<uint32_t>& rhs);
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing);
+std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
+ ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing);
// Versioning
-bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_0(const hal::V1_3::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_1(const hal::V1_3::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_2(const hal::V1_3::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_0::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_1::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_2::Capabilities& capabilities);
-bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_0(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_1(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_2(const V1_3::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_0::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_1::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_2::Capabilities& capabilities);
+bool compliantWithV1_3(const V1_3::Capabilities& capabilities);
// If noncompliantOperations != nullptr, then
// precondition: noncompliantOperations->empty()
@@ -455,114 +480,127 @@ bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities);
// operations; if the compliance check fails for some reason
// other than a noncompliant operation,
// *noncompliantOperations consists of the indices of all operations
-bool compliantWithV1_0(const hal::V1_0::Model& model);
-bool compliantWithV1_0(const hal::V1_1::Model& model);
-bool compliantWithV1_0(const hal::V1_2::Model& model,
+bool compliantWithV1_0(const V1_0::Model& model);
+bool compliantWithV1_0(const V1_1::Model& model);
+bool compliantWithV1_0(const V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_0(const hal::V1_3::Model& model,
+bool compliantWithV1_0(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const hal::V1_0::Model& model);
-bool compliantWithV1_1(const hal::V1_1::Model& model);
-bool compliantWithV1_1(const hal::V1_2::Model& model,
+bool compliantWithV1_1(const V1_0::Model& model);
+bool compliantWithV1_1(const V1_1::Model& model);
+bool compliantWithV1_1(const V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_1(const hal::V1_3::Model& model,
+bool compliantWithV1_1(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_2(const hal::V1_0::Model& model);
-bool compliantWithV1_2(const hal::V1_1::Model& model);
-bool compliantWithV1_2(const hal::V1_2::Model& model,
+bool compliantWithV1_2(const V1_0::Model& model);
+bool compliantWithV1_2(const V1_1::Model& model);
+bool compliantWithV1_2(const V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-bool compliantWithV1_2(const hal::V1_3::Model& model,
+bool compliantWithV1_2(const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
-hal::V1_0::ErrorStatus convertToV1_0(hal::V1_0::ErrorStatus status);
-hal::V1_0::ErrorStatus convertToV1_0(hal::V1_3::ErrorStatus status);
-hal::V1_3::ErrorStatus convertToV1_3(hal::V1_0::ErrorStatus status);
-hal::V1_3::ErrorStatus convertToV1_3(hal::V1_3::ErrorStatus status);
-
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
-hal::V1_0::Capabilities convertToV1_0(const hal::V1_3::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities);
-hal::V1_1::Capabilities convertToV1_1(const hal::V1_3::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities);
-hal::V1_2::Capabilities convertToV1_2(const hal::V1_3::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_0::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_1::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_2::Capabilities& capabilities);
-hal::V1_3::Capabilities convertToV1_3(const hal::V1_3::Capabilities& capabilities);
-
-hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model);
-hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model);
-hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model);
-hal::V1_0::Model convertToV1_0(const hal::V1_3::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model);
-hal::V1_1::Model convertToV1_1(const hal::V1_3::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model);
-hal::V1_2::Model convertToV1_2(const hal::V1_3::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_0::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_1::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_2::Model& model);
-hal::V1_3::Model convertToV1_3(const hal::V1_3::Model& model);
-
-hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_3::OperationType type);
-hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_3::OperationType type);
-hal::V1_2::OperationType uncheckedConvertToV1_2(hal::V1_3::OperationType type);
-
-hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand);
-hal::V1_0::Operand convertToV1_0(const hal::V1_3::Operand& operand);
-hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand);
-hal::V1_2::Operand convertToV1_2(const hal::V1_3::Operand& operand);
-hal::V1_3::Operand convertToV1_3(const hal::V1_0::Operand& operand);
-hal::V1_3::Operand convertToV1_3(const hal::V1_2::Operand& operand);
-hal::V1_3::Operand convertToV1_3(const hal::V1_3::Operand& operand);
-
-hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_0::Operand>& operands);
-hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_2::Operand>& operands);
-hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_3::Operand>& operands);
-hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands);
-hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands);
-hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_3::Operand>& operands);
-hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_0::Operand>& operands);
-hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_2::Operand>& operands);
-hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_3::Operand>& operands);
-
-bool compliantWithV1_0(const hal::V1_0::Request& request);
-bool compliantWithV1_0(const hal::V1_3::Request& request);
-bool compliantWithV1_2(const hal::V1_3::Request& request);
-
-hal::V1_0::Request convertToV1_0(const hal::V1_0::Request& request);
-hal::V1_0::Request convertToV1_0(const hal::V1_3::Request& request);
-hal::V1_0::Request convertToV1_2(const hal::V1_3::Request& request);
-hal::V1_3::Request convertToV1_3(const hal::V1_0::Request& request);
-hal::V1_3::Request convertToV1_3(const hal::V1_3::Request& request);
-
-bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime);
-bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime);
-bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime);
-bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime);
-
-hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_0::OperandLifeTime lifetime);
-hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_3::OperandLifeTime lifetime);
-hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_0::OperandLifeTime lifetime);
-hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_3::OperandLifeTime lifetime);
-
-constexpr hal::Priority convertToHalPriority(int32_t priority) {
+V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status);
+V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status);
+V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status);
+V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status);
+
+V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities);
+V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities);
+V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities);
+V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities);
+V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities);
+
+V1_0::Model convertToV1_0(const V1_0::Model& model);
+V1_0::Model convertToV1_0(const V1_1::Model& model);
+V1_0::Model convertToV1_0(const V1_2::Model& model);
+V1_0::Model convertToV1_0(const V1_3::Model& model);
+V1_1::Model convertToV1_1(const V1_0::Model& model);
+V1_1::Model convertToV1_1(const V1_1::Model& model);
+V1_1::Model convertToV1_1(const V1_2::Model& model);
+V1_1::Model convertToV1_1(const V1_3::Model& model);
+V1_2::Model convertToV1_2(const V1_0::Model& model);
+V1_2::Model convertToV1_2(const V1_1::Model& model);
+V1_2::Model convertToV1_2(const V1_2::Model& model);
+V1_2::Model convertToV1_2(const V1_3::Model& model);
+V1_3::Model convertToV1_3(const V1_0::Model& model);
+V1_3::Model convertToV1_3(const V1_1::Model& model);
+V1_3::Model convertToV1_3(const V1_2::Model& model);
+V1_3::Model convertToV1_3(const V1_3::Model& model);
+
+V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type);
+V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type);
+V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type);
+
+V1_0::Operand convertToV1_0(const V1_2::Operand& operand);
+V1_0::Operand convertToV1_0(const V1_3::Operand& operand);
+V1_2::Operand convertToV1_2(const V1_0::Operand& operand);
+V1_2::Operand convertToV1_2(const V1_3::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_0::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_2::Operand& operand);
+V1_3::Operand convertToV1_3(const V1_3::Operand& operand);
+
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands);
+hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands);
+
+bool compliantWithV1_0(const V1_0::Request& request);
+bool compliantWithV1_0(const V1_3::Request& request);
+bool compliantWithV1_2(const V1_3::Request& request);
+
+V1_0::Request convertToV1_0(const V1_0::Request& request);
+V1_0::Request convertToV1_0(const V1_3::Request& request);
+V1_0::Request convertToV1_2(const V1_3::Request& request);
+V1_3::Request convertToV1_3(const V1_0::Request& request);
+V1_3::Request convertToV1_3(const V1_3::Request& request);
+
+bool compliantWithV1_0(V1_0::OperandLifeTime lifetime);
+bool compliantWithV1_0(V1_3::OperandLifeTime lifetime);
+bool compliantWithV1_3(V1_0::OperandLifeTime lifetime);
+bool compliantWithV1_3(V1_3::OperandLifeTime lifetime);
+
+V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime);
+V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime);
+V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime);
+V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime);
+
+constexpr V1_3::Priority convertToHalPriority(int32_t priority) {
+ switch (priority) {
+ case ANEURALNETWORKS_PRIORITY_LOW:
+ return V1_3::Priority::LOW;
+ case ANEURALNETWORKS_PRIORITY_MEDIUM:
+ return V1_3::Priority::MEDIUM;
+ case ANEURALNETWORKS_PRIORITY_HIGH:
+ return V1_3::Priority::HIGH;
+ }
+ LOG(FATAL) << "unrecognized priority: " << priority;
+ return {};
+}
+
+constexpr Priority convertToCanonicalPriority(int32_t priority) {
switch (priority) {
case ANEURALNETWORKS_PRIORITY_LOW:
- return hal::Priority::LOW;
+ return Priority::LOW;
case ANEURALNETWORKS_PRIORITY_MEDIUM:
- return hal::Priority::MEDIUM;
+ return Priority::MEDIUM;
case ANEURALNETWORKS_PRIORITY_HIGH:
- return hal::Priority::HIGH;
+ return Priority::HIGH;
}
LOG(FATAL) << "unrecognized priority: " << priority;
return {};
@@ -583,6 +621,76 @@ FenceState syncWait(int fd, int timeout);
uint32_t getProp(const char* str, uint32_t defaultValue = 0);
#endif // NN_DEBUGGABLE
+// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h.
+Capabilities::OperandPerformance uncheckedConvert(
+ const V1_3::Capabilities::OperandPerformance& operandPerformance);
+Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo);
+Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities);
+DataLocation uncheckedConvert(const V1_0::DataLocation& location);
+ErrorStatus uncheckedConvert(V1_0::ErrorStatus status);
+ErrorStatus uncheckedConvert(V1_3::ErrorStatus status);
+Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&);
+Extension uncheckedConvert(const V1_2::Extension& extension);
+hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params);
+MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure);
+Memory uncheckedConvert(const hardware::hidl_memory& memory);
+Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&);
+Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph);
+Model uncheckedConvert(const V1_3::Model& model);
+Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params);
+Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params);
+Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime);
+Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params);
+OperandType uncheckedConvert(V1_3::OperandType operandType);
+Operand uncheckedConvert(const V1_3::Operand& operand);
+OperationType uncheckedConvert(V1_3::OperationType operationType);
+Operation uncheckedConvert(const V1_3::Operation& operation);
+OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration);
+OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape);
+Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument);
+Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool);
+Request uncheckedConvert(const V1_3::Request& request);
+std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions);
+std::vector<Memory> uncheckedConvert(const hardware::hidl_vec<hardware::hidl_memory>& memories);
+std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs);
+std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands);
+std::vector<OutputShape> uncheckedConvert(
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes);
+std::vector<Request::MemoryPool> uncheckedConvert(
+ const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools);
+Timing uncheckedConvert(const V1_2::Timing& timing);
+
+// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h.
+hardware::hidl_memory convertToV1_0(const Memory& memory);
+hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<Memory>& memories);
+hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues);
+hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes);
+hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles);
+V1_0::DataLocation convertToV1_0(const DataLocation& location);
+V1_0::ErrorStatus convertToV1_0(ErrorStatus status);
+V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument);
+V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference);
+V1_2::MeasureTiming convertToV1_2(MeasureTiming measure);
+V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&);
+V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params);
+V1_2::OutputShape convertToV1_2(const OutputShape& outputShape);
+V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params);
+V1_2::Timing convertToV1_2(const Timing& timing);
+V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole);
+V1_3::ErrorStatus convertToV1_3(ErrorStatus status);
+V1_3::Model convertToV1_3(const Model& model);
+V1_3::Operand convertToV1_3(const Operand& operand);
+V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime);
+V1_3::OperandType convertToV1_3(OperandType operandType);
+V1_3::Operation convertToV1_3(const Operation& operation);
+V1_3::OperationType convertToV1_3(OperationType operationType);
+V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration);
+V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint);
+V1_3::Priority convertToV1_3(Priority priority);
+V1_3::Request convertToV1_3(const Request& request);
+V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool);
+V1_3::Subgraph convertToV1_3(const Model::Subgraph& model);
+
} // namespace nn
} // namespace android
diff --git a/nn/common/include/ValidateHal.h b/nn/common/include/ValidateHal.h
index 32d7662ed..c501fc011 100644
--- a/nn/common/include/ValidateHal.h
+++ b/nn/common/include/ValidateHal.h
@@ -35,7 +35,7 @@ enum class HalVersion : int32_t {
};
enum class IOType { INPUT, OUTPUT };
-using PreparedModelRole = std::tuple<const hal::IPreparedModel*, IOType, uint32_t>;
+using PreparedModelRole = std::tuple<const V1_3::IPreparedModel*, IOType, uint32_t>;
// 1.3 HAL does not support control flow operations with operands of unknown size.
// See http://b/132458982#comment63.
@@ -62,35 +62,35 @@ bool validateRequest(const T_Request& request, const T_Model& model,
bool allowUnspecifiedOutput = true);
// Verifies that the execution preference is valid.
-bool validateExecutionPreference(hal::ExecutionPreference preference);
+bool validateExecutionPreference(V1_1::ExecutionPreference preference);
// Verifies that the priority is valid.
-bool validatePriority(hal::Priority priority);
+bool validatePriority(V1_3::Priority priority);
-bool validOperationType(hal::V1_0::OperationType operation);
-bool validOperationType(hal::V1_1::OperationType operation);
-bool validOperationType(hal::V1_2::OperationType operation);
+bool validOperationType(V1_0::OperationType operation);
+bool validOperationType(V1_1::OperationType operation);
+bool validOperationType(V1_2::OperationType operation);
-bool validOperandType(hal::V1_0::OperandType operand);
-bool validOperandType(hal::V1_2::OperandType operand);
-bool validOperandType(hal::V1_3::OperandType operand);
+bool validOperandType(V1_0::OperandType operand);
+bool validOperandType(V1_2::OperandType operand);
+bool validOperandType(V1_3::OperandType operand);
// Verifies that the memory pool is valid in the specified HAL version.
-bool validatePool(const hal::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
-bool validatePool(const hal::V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST);
+bool validatePool(const hardware::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
+bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST);
// Verifies that the input arguments to IDevice::allocate are valid.
// Optionally, this function can return a flattened prepared model roles and a combined operand.
// Pass nullptr if either value is not needed.
// IMPORTANT: This function cannot validate dimensions and extraParams with extension operand type.
// Each driver should do their own validation of extension type dimensions and extraParams.
-bool validateMemoryDesc(
- const hal::V1_3::BufferDesc& desc,
- const hal::hidl_vec<sp<hal::V1_3::IPreparedModel>>& preparedModels,
- const hal::hidl_vec<hal::V1_3::BufferRole>& inputRoles,
- const hal::hidl_vec<hal::V1_3::BufferRole>& outputRoles,
- std::function<const hal::V1_3::Model*(const sp<hal::V1_3::IPreparedModel>&)> getModel,
- std::set<PreparedModelRole>* preparedModelRoles, hal::V1_3::Operand* combinedOperand);
+bool validateMemoryDesc(const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles,
+ std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel,
+ std::set<PreparedModelRole>* preparedModelRoles,
+ V1_3::Operand* combinedOperand);
} // namespace nn
} // namespace android
diff --git a/nn/common/operations/Activation.cpp b/nn/common/operations/Activation.cpp
index ff5a55dc3..c0a1934d7 100644
--- a/nn/common/operations/Activation.cpp
+++ b/nn/common/operations/Activation.cpp
@@ -28,7 +28,6 @@
#include "ActivationFunctor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -36,8 +35,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
namespace activation {
constexpr uint32_t kNumInputs = 1;
@@ -373,7 +370,7 @@ bool validate(OperationType opType, const IOperationValidationContext* context)
} else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
const Shape& input = context->getInputShape(kInputTensor);
if (hasKnownRank(input)) {
diff --git a/nn/common/operations/ArgMinMax.cpp b/nn/common/operations/ArgMinMax.cpp
index f53ba47e4..2ee413c7e 100644
--- a/nn/common/operations/ArgMinMax.cpp
+++ b/nn/common/operations/ArgMinMax.cpp
@@ -19,7 +19,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -27,8 +26,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
template <typename In, typename Out>
static void argMinMaxImpl(const In* inputData, const Shape& inputShape, int32_t axis, bool isArgMin,
Out* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/BidirectionalSequenceLSTM.cpp b/nn/common/operations/BidirectionalSequenceLSTM.cpp
index 12ac43f20..6cf095b1e 100644
--- a/nn/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/nn/common/operations/BidirectionalSequenceLSTM.cpp
@@ -23,7 +23,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -32,8 +31,6 @@ namespace nn {
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/BidirectionalSequenceLSTM.h b/nn/common/operations/BidirectionalSequenceLSTM.h
index 184b65da0..7077d3b10 100644
--- a/nn/common/operations/BidirectionalSequenceLSTM.h
+++ b/nn/common/operations/BidirectionalSequenceLSTM.h
@@ -34,12 +34,11 @@ struct RunTimeOperandInfo;
class BidirectionalSequenceLSTM {
public:
- BidirectionalSequenceLSTM(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ BidirectionalSequenceLSTM(const Operation& operation, RunTimeOperandInfo* operands);
- bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
- Shape* fwOutputShape, Shape* bwOutputShape, Shape* fwOutputActivationState,
- Shape* fwOutputCellState, Shape* bwOutputActivationState,
- Shape* bwOutputCellState);
+ bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* fwOutputShape,
+ Shape* bwOutputShape, Shape* fwOutputActivationState, Shape* fwOutputCellState,
+ Shape* bwOutputActivationState, Shape* bwOutputCellState);
bool Eval();
// Input Tensors of size {max_time, n_batch, n_input}
diff --git a/nn/common/operations/BidirectionalSequenceRNN.cpp b/nn/common/operations/BidirectionalSequenceRNN.cpp
index 98917c0b8..adacea0c6 100644
--- a/nn/common/operations/BidirectionalSequenceRNN.cpp
+++ b/nn/common/operations/BidirectionalSequenceRNN.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
@@ -61,8 +60,6 @@ constexpr uint32_t kBwOutputHiddenStateTensor = 3;
namespace {
-using namespace hal;
-
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
@@ -327,7 +324,7 @@ bool validate(const IOperationValidationContext* context) {
OperandType inputType = context->getInputType(kInputTensor);
if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) {
LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: "
- << toString(inputType);
+ << inputType;
return false;
}
NN_RET_CHECK(validateInputTypes(
diff --git a/nn/common/operations/Broadcast.cpp b/nn/common/operations/Broadcast.cpp
index 17094afa3..67bb914bd 100644
--- a/nn/common/operations/Broadcast.cpp
+++ b/nn/common/operations/Broadcast.cpp
@@ -29,16 +29,14 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace broadcast {
constexpr uint32_t kNumInputs = 3;
@@ -53,16 +51,16 @@ namespace {
#define ANDROID_NN_MACRO_DISPATCH(macro) \
switch (activation) { \
- case (int32_t)FusedActivationFunc::NONE: \
+ case static_cast<int32_t>(FusedActivationFunc::NONE): \
macro(kNone); \
break; \
- case (int32_t)FusedActivationFunc::RELU: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU): \
macro(kRelu); \
break; \
- case (int32_t)FusedActivationFunc::RELU1: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU1): \
macro(kRelu1); \
break; \
- case (int32_t)FusedActivationFunc::RELU6: \
+ case static_cast<int32_t>(FusedActivationFunc::RELU6): \
macro(kRelu6); \
break; \
default: \
@@ -464,7 +462,7 @@ bool validate(OperationType opType, const IOperationValidationContext* context)
inputType == OperandType::TENSOR_INT32) {
NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_3, opIntroducedAt)));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
const Shape& input1 = context->getInputShape(kInputTensor1);
const Shape& input2 = context->getInputShape(kInputTensor2);
diff --git a/nn/common/operations/Cast.cpp b/nn/common/operations/Cast.cpp
index 77e35afb0..aef3baf79 100644
--- a/nn/common/operations/Cast.cpp
+++ b/nn/common/operations/Cast.cpp
@@ -20,7 +20,6 @@
#include <algorithm>
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -30,8 +29,6 @@ namespace cast {
namespace {
-using namespace hal;
-
template <typename FromT, typename ToT>
void copyCast(const FromT* in, ToT* out, int numElements) {
std::transform(in, in + numElements, out, [](FromT a) -> ToT {
diff --git a/nn/common/operations/ChannelShuffle.cpp b/nn/common/operations/ChannelShuffle.cpp
index 7abf224c8..779a8d811 100644
--- a/nn/common/operations/ChannelShuffle.cpp
+++ b/nn/common/operations/ChannelShuffle.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -25,8 +24,6 @@ namespace android {
namespace nn {
namespace channel_shuffle {
-using namespace hal;
-
constexpr char kOperationName[] = "CHANNEL_SHUFFLE";
constexpr uint32_t kNumInputs = 3;
diff --git a/nn/common/operations/Comparisons.cpp b/nn/common/operations/Comparisons.cpp
index a8f86228a..50ed806a0 100644
--- a/nn/common/operations/Comparisons.cpp
+++ b/nn/common/operations/Comparisons.cpp
@@ -19,7 +19,6 @@
#include <functional>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -37,8 +36,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename DataType, typename ComparisonType>
bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData,
const Shape& aShape, const DataType* bData, const Shape& bShape, bool8* outputData,
@@ -135,7 +132,7 @@ bool validate(const IOperationValidationContext* context) {
inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for comparison op: " << toString(inputType);
+ << "Unsupported input operand type for comparison op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType}));
NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8}));
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/nn/common/operations/Concatenation.cpp b/nn/common/operations/Concatenation.cpp
index 08c9c6130..6de5baded 100644
--- a/nn/common/operations/Concatenation.cpp
+++ b/nn/common/operations/Concatenation.cpp
@@ -27,7 +27,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool concatenation(const std::vector<const T*>& inputDataPtrs,
const std::vector<Shape>& inputShapes, int32_t axis, T* outputData,
diff --git a/nn/common/operations/Conv2D.cpp b/nn/common/operations/Conv2D.cpp
index f34e9080c..5b7d8d0de 100644
--- a/nn/common/operations/Conv2D.cpp
+++ b/nn/common/operations/Conv2D.cpp
@@ -26,7 +26,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "OperationsUtils.h"
@@ -49,8 +48,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
@@ -566,7 +563,9 @@ bool validate(const IOperationValidationContext* context) {
OperandType::INT32};
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
0)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -727,7 +726,9 @@ bool execute(IOperationExecutionContext* context) {
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
@@ -758,7 +759,9 @@ bool execute(IOperationExecutionContext* context) {
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
diff --git a/nn/common/operations/DepthwiseConv2D.cpp b/nn/common/operations/DepthwiseConv2D.cpp
index 32e8b5594..47bf0104f 100644
--- a/nn/common/operations/DepthwiseConv2D.cpp
+++ b/nn/common/operations/DepthwiseConv2D.cpp
@@ -42,8 +42,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
struct DepthwiseConv2dParam {
int32_t padding_left, padding_right;
int32_t padding_top, padding_bottom;
@@ -443,7 +441,9 @@ bool validate(const IOperationValidationContext* context) {
filterType == inputType)
<< "Unsupported filter tensor type for operation " << kOperationName;
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
3)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -607,7 +607,9 @@ bool execute(IOperationExecutionContext* context) {
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
@@ -639,7 +641,9 @@ bool execute(IOperationExecutionContext* context) {
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param.padding_left,
param.padding_right, param.padding_top, param.padding_bottom,
diff --git a/nn/common/operations/Dequantize.cpp b/nn/common/operations/Dequantize.cpp
index 2fb2d5cb0..7b8114347 100644
--- a/nn/common/operations/Dequantize.cpp
+++ b/nn/common/operations/Dequantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -33,8 +32,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename InputType, typename OutputType>
bool compute(const InputType* inputData, const Shape& inputShape, OutputType* outputData) {
const int numElements = getNumberOfElements(inputShape);
@@ -52,7 +49,8 @@ bool computePerChannel(const int8_t* inputData, const Shape& inputShape, OutputT
// First we calculate a stride which is the number of elements we need to
// skip to change an index along a dimension with different quantization
// scales.
- const int channelDim = inputShape.extraParams.channelQuant().channelDim;
+ const int channelDim =
+ std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams).channelDim;
int stride = 1;
for (int i = getNumberOfDimensions(inputShape) - 1; i > channelDim; --i) {
stride *= getSizeOfDimension(inputShape, i);
@@ -67,7 +65,8 @@ bool computePerChannel(const int8_t* inputData, const Shape& inputShape, OutputT
// size of the dimension (so that we don't have an overflow if the
// channelDim is not 0).
const int scaleIndex = (i / stride) % getSizeOfDimension(inputShape, channelDim);
- const float scale = inputShape.extraParams.channelQuant().scales[scaleIndex];
+ const float scale = std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams)
+ .scales[scaleIndex];
const int32_t value = inputData[i];
outputData[i] = static_cast<OutputType>(scale * (value - zeroPoint));
}
@@ -97,10 +96,10 @@ bool validate(const IOperationValidationContext* context) {
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
inputType == OperandType::TENSOR_QUANT8_SYMM ||
inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)
- << "Unsupported input operand type for DEQUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for DEQUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 ||
outputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported output operand type for DEQUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for DEQUANTIZE op: " << outputType;
return validateHalVersion(context, HalVersion::V1_2);
}
@@ -155,7 +154,7 @@ bool execute(IOperationExecutionContext* context) {
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for dequantize op. (input type: "
- << toString(inputType) << " output type: " << toString(outputType) << ")";
+ << inputType << " output type: " << outputType << ")";
}
} // namespace dequantize
diff --git a/nn/common/operations/Elementwise.cpp b/nn/common/operations/Elementwise.cpp
index 82a268763..3ddae9096 100644
--- a/nn/common/operations/Elementwise.cpp
+++ b/nn/common/operations/Elementwise.cpp
@@ -18,7 +18,6 @@
#include <cmath>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -35,8 +34,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename IntermediateType, typename T>
inline bool compute(IntermediateType func(IntermediateType), const T* input, const Shape& shape,
T* output) {
diff --git a/nn/common/operations/Elu.cpp b/nn/common/operations/Elu.cpp
index 07304e7d7..dfb221c63 100644
--- a/nn/common/operations/Elu.cpp
+++ b/nn/common/operations/Elu.cpp
@@ -20,7 +20,6 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -30,8 +29,6 @@ namespace android {
namespace nn {
namespace elu {
-using namespace hal;
-
constexpr uint32_t kNumInputs = 2;
constexpr uint32_t kInputTensor = 0;
constexpr uint32_t kAlphaScalar = 1;
diff --git a/nn/common/operations/EmbeddingLookup.cpp b/nn/common/operations/EmbeddingLookup.cpp
index 12e4a65a9..5ff26e8e0 100644
--- a/nn/common/operations/EmbeddingLookup.cpp
+++ b/nn/common/operations/EmbeddingLookup.cpp
@@ -19,7 +19,6 @@
#include "EmbeddingLookup.h"
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -27,8 +26,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
EmbeddingLookup::EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands) {
value_ = GetInput(operation, operands, kValueTensor);
lookup_ = GetInput(operation, operands, kLookupTensor);
diff --git a/nn/common/operations/EmbeddingLookup.h b/nn/common/operations/EmbeddingLookup.h
index 9a82ddabf..0388b355a 100644
--- a/nn/common/operations/EmbeddingLookup.h
+++ b/nn/common/operations/EmbeddingLookup.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -28,7 +28,7 @@ struct RunTimeOperandInfo;
class EmbeddingLookup {
public:
- EmbeddingLookup(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands);
bool Eval();
diff --git a/nn/common/operations/Fill.cpp b/nn/common/operations/Fill.cpp
index a6b390698..a23362759 100644
--- a/nn/common/operations/Fill.cpp
+++ b/nn/common/operations/Fill.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
namespace android {
@@ -33,8 +32,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool executeTyped(IOperationExecutionContext* context) {
T* output = context->getOutputBuffer<T>(kOutputTensor);
@@ -58,7 +55,7 @@ bool getValueType(OperandType outputType, OperandType* valueType) {
*valueType = OperandType::INT32;
return true;
default:
- NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << toString(outputType);
+ NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << outputType;
}
}
@@ -73,7 +70,7 @@ bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 ||
outputType == OperandType::TENSOR_FLOAT32 ||
outputType == OperandType::TENSOR_INT32)
- << "Unsupported output type for fill op: " << toString(outputType);
+ << "Unsupported output type for fill op: " << outputType;
NN_RET_CHECK(validateOutputTypes(context, {outputType}));
OperandType valueType;
diff --git a/nn/common/operations/FullyConnected.cpp b/nn/common/operations/FullyConnected.cpp
index 9bdd0bab2..9fcc072ff 100644
--- a/nn/common/operations/FullyConnected.cpp
+++ b/nn/common/operations/FullyConnected.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -45,8 +44,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
// executionMutex is used to protect concurrent access of non-threadsafe resources
// like gemmlowp::GemmContext.
// std::mutex is safe for pthreads on Android.
diff --git a/nn/common/operations/Gather.cpp b/nn/common/operations/Gather.cpp
index d496d6ada..e73a22eb5 100644
--- a/nn/common/operations/Gather.cpp
+++ b/nn/common/operations/Gather.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis,
const int32_t* indicesData, const Shape& indicesShape, T* outputData) {
diff --git a/nn/common/operations/GenerateProposals.cpp b/nn/common/operations/GenerateProposals.cpp
index 4e3aa3fb0..2ef733e14 100644
--- a/nn/common/operations/GenerateProposals.cpp
+++ b/nn/common/operations/GenerateProposals.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -35,8 +34,6 @@ namespace bbox_ops {
namespace {
-using namespace hal;
-
struct BoxEncodingCorner {
float x1, y1, x2, y2;
};
diff --git a/nn/common/operations/HashtableLookup.cpp b/nn/common/operations/HashtableLookup.cpp
index 287c866d0..cfb9d9812 100644
--- a/nn/common/operations/HashtableLookup.cpp
+++ b/nn/common/operations/HashtableLookup.cpp
@@ -19,7 +19,6 @@
#include "HashtableLookup.h"
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Operations.h"
#include "Tracing.h"
@@ -29,8 +28,6 @@ namespace nn {
namespace {
-using namespace hal;
-
int greater(const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
}
diff --git a/nn/common/operations/HashtableLookup.h b/nn/common/operations/HashtableLookup.h
index c0921e0fd..1ae554f56 100644
--- a/nn/common/operations/HashtableLookup.h
+++ b/nn/common/operations/HashtableLookup.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -28,7 +28,7 @@ struct RunTimeOperandInfo;
class HashtableLookup {
public:
- HashtableLookup(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ HashtableLookup(const Operation& operation, RunTimeOperandInfo* operands);
bool Eval();
diff --git a/nn/common/operations/HeatmapMaxKeypoint.cpp b/nn/common/operations/HeatmapMaxKeypoint.cpp
index 3608ca59d..a07e1428c 100644
--- a/nn/common/operations/HeatmapMaxKeypoint.cpp
+++ b/nn/common/operations/HeatmapMaxKeypoint.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -44,8 +43,6 @@ constexpr uint32_t kOutputKeypointTensor = 1;
namespace {
-using namespace hal;
-
// This function uses Taylor expansion up to the quatratic term to approximate bicubic
// upscaling result.
// 2nd order Taylor expansion: D(x) = D - b'x + 1/2 * x'Ax
diff --git a/nn/common/operations/InstanceNormalization.cpp b/nn/common/operations/InstanceNormalization.cpp
index 75b907b64..0ce21d03e 100644
--- a/nn/common/operations/InstanceNormalization.cpp
+++ b/nn/common/operations/InstanceNormalization.cpp
@@ -20,7 +20,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
inline bool instanceNormNhwc(const T* inputData, const Shape& inputShape, T gamma, T beta,
T epsilon, T* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/L2Normalization.cpp b/nn/common/operations/L2Normalization.cpp
index 1f0c9d051..f86ab8011 100644
--- a/nn/common/operations/L2Normalization.cpp
+++ b/nn/common/operations/L2Normalization.cpp
@@ -23,7 +23,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -42,8 +41,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis,
float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("l2normFloat32");
diff --git a/nn/common/operations/LSHProjection.cpp b/nn/common/operations/LSHProjection.cpp
index bdb106e18..14d7a790d 100644
--- a/nn/common/operations/LSHProjection.cpp
+++ b/nn/common/operations/LSHProjection.cpp
@@ -18,19 +18,18 @@
#include "LSHProjection.h"
+#include <utils/hash/farmhash.h>
+
+#include <memory>
+
#include "CpuExecutor.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
#include "Utils.h"
-
-#include <utils/hash/farmhash.h>
-#include <memory>
+#include "nnapi/Types.h"
namespace android {
namespace nn {
-using namespace hal;
-
LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* operands) {
input_ = GetInput(operation, operands, kInputTensor);
weight_ = GetInput(operation, operands, kWeightTensor);
@@ -112,7 +111,7 @@ int runningSignBit(const RunTimeOperandInfo* input, const RunTimeOperandInfo* we
int64_t hash_signature = farmhash::Fingerprint64(key.get(), key_bytes);
double running_value = static_cast<double>(hash_signature);
input_ptr += input_item_bytes;
- if (weight->lifetime == OperandLifeTime::NO_VALUE) {
+ if (weight->lifetime == Operand::LifeTime::NO_VALUE) {
score += running_value;
} else {
score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
diff --git a/nn/common/operations/LSHProjection.h b/nn/common/operations/LSHProjection.h
index 520f58a89..3a953a0ad 100644
--- a/nn/common/operations/LSHProjection.h
+++ b/nn/common/operations/LSHProjection.h
@@ -19,7 +19,7 @@
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -36,9 +36,9 @@ struct Shape;
class LSHProjection {
public:
- LSHProjection(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ LSHProjection(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape);
template <typename T>
bool Eval();
diff --git a/nn/common/operations/LSTM.cpp b/nn/common/operations/LSTM.cpp
index 3051cfd99..e64d0c495 100644
--- a/nn/common/operations/LSTM.cpp
+++ b/nn/common/operations/LSTM.cpp
@@ -22,18 +22,16 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationsUtils.h"
#include "Tracing.h"
#include "Utils.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
@@ -113,7 +111,7 @@ LSTMCell::LSTMCell(const Operation& operation, RunTimeOperandInfo* operands) {
} else {
// For LSTM from HAL v1.0 assign operands with no values
static RunTimeOperandInfo no_value;
- no_value.lifetime = OperandLifeTime::NO_VALUE;
+ no_value.lifetime = Operand::LifeTime::NO_VALUE;
input_layer_norm_weights_ = &no_value;
forget_layer_norm_weights_ = &no_value;
@@ -221,8 +219,8 @@ bool LSTMCell::CheckInputTensorDimensions(
// omitted ones can be omited in case CIFG LSTM is used.
params->use_layer_norm = !IsNullInput(output_layer_norm_weights);
- params->use_projection_weight = (projection_weights->lifetime != OperandLifeTime::NO_VALUE);
- params->use_projection_bias = (projection_bias->lifetime != OperandLifeTime::NO_VALUE);
+ params->use_projection_weight = (projection_weights->lifetime != Operand::LifeTime::NO_VALUE);
+ params->use_projection_bias = (projection_bias->lifetime != Operand::LifeTime::NO_VALUE);
// Make sure the input gate bias is present only when not a CIFG-LSTM.
if (params->use_cifg) {
diff --git a/nn/common/operations/LSTM.h b/nn/common/operations/LSTM.h
index b48c3df3c..dc6a43cfa 100644
--- a/nn/common/operations/LSTM.h
+++ b/nn/common/operations/LSTM.h
@@ -24,7 +24,7 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -48,9 +48,9 @@ struct Shape;
class LSTMCell {
public:
- LSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ LSTMCell(const Operation& operation, RunTimeOperandInfo* operands);
- bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape,
+ bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape,
Shape* outputStateShape, Shape* cellStateShape, Shape* outputShape);
bool Eval();
diff --git a/nn/common/operations/LocalResponseNormalization.cpp b/nn/common/operations/LocalResponseNormalization.cpp
index 40220e1b0..26a7a002a 100644
--- a/nn/common/operations/LocalResponseNormalization.cpp
+++ b/nn/common/operations/LocalResponseNormalization.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -45,8 +44,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
inline bool localResponseNormFloat32Impl(const float* inputData, const Shape& inputShape,
int32_t radius, float bias, float alpha, float beta,
int32_t axis, float* outputData,
diff --git a/nn/common/operations/LogSoftmax.cpp b/nn/common/operations/LogSoftmax.cpp
index 4132ef9ae..fdcccf851 100644
--- a/nn/common/operations/LogSoftmax.cpp
+++ b/nn/common/operations/LogSoftmax.cpp
@@ -16,19 +16,18 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
+#include <algorithm>
+#include <cmath>
+#include <vector>
+
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
-#include <cmath>
-
namespace android {
namespace nn {
namespace log_softmax {
-using namespace hal;
-
constexpr char kOperationName[] = "LOG_SOFTMAX";
constexpr uint32_t kNumInputs = 3;
diff --git a/nn/common/operations/LogicalAndOr.cpp b/nn/common/operations/LogicalAndOr.cpp
index 6ada724e0..9d7e5ce19 100644
--- a/nn/common/operations/LogicalAndOr.cpp
+++ b/nn/common/operations/LogicalAndOr.cpp
@@ -16,7 +16,9 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
+#include <functional>
+#include <vector>
+
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -34,8 +36,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
bool compute(const std::function<bool(bool, bool)>& func, const bool8* aData, const Shape& aShape,
const bool8* bData, const Shape& bShape, bool8* outputData, const Shape& outputShape) {
IndexedShapeWrapper aShapeIndexed(aShape);
diff --git a/nn/common/operations/LogicalNot.cpp b/nn/common/operations/LogicalNot.cpp
index 8b418135e..c71538854 100644
--- a/nn/common/operations/LogicalNot.cpp
+++ b/nn/common/operations/LogicalNot.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -32,8 +31,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
bool compute(const bool8* input, const Shape& shape, bool8* output) {
const auto size = getNumberOfElements(shape);
for (uint32_t i = 0; i < size; ++i) {
diff --git a/nn/common/operations/MaximumMinimum.cpp b/nn/common/operations/MaximumMinimum.cpp
index 91a4bb021..339172fd2 100644
--- a/nn/common/operations/MaximumMinimum.cpp
+++ b/nn/common/operations/MaximumMinimum.cpp
@@ -20,7 +20,6 @@
#include <vector>
#include "MaximumMinimum.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -31,8 +30,6 @@ namespace maximum_minimum {
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* aData, const Shape& aShape, const T* bData, const Shape& bShape,
bool isMinimum, T* outputData, const Shape& outputShape) {
@@ -124,7 +121,7 @@ bool eval(const void* in1, const Shape& shape1, const void* in2, const Shape& sh
reinterpret_cast<int8_t*>(output), outputShape);
}
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(shape1.type);
+ LOG(ERROR) << "Unsupported data type: " << shape1.type;
return false;
}
}
diff --git a/nn/common/operations/Multinomial.cpp b/nn/common/operations/Multinomial.cpp
index 7e1d2c6f8..80fb7e880 100644
--- a/nn/common/operations/Multinomial.cpp
+++ b/nn/common/operations/Multinomial.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
#include "guarded_philox_random.h"
@@ -37,8 +36,6 @@ namespace nn {
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/Multinomial.h b/nn/common/operations/Multinomial.h
index 0f5434e9b..bdfe58757 100644
--- a/nn/common/operations/Multinomial.h
+++ b/nn/common/operations/Multinomial.h
@@ -23,7 +23,7 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -33,9 +33,9 @@ struct Shape;
class Multinomial {
public:
- Multinomial(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ Multinomial(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape);
bool Eval();
diff --git a/nn/common/operations/MultinomialTest.cpp b/nn/common/operations/MultinomialTest.cpp
index e34de63dc..668ed36b3 100644
--- a/nn/common/operations/MultinomialTest.cpp
+++ b/nn/common/operations/MultinomialTest.cpp
@@ -14,17 +14,17 @@
* limitations under the License.
*/
-#include "Multinomial.h"
+#include <gmock/gmock-matchers.h>
+#include <gtest/gtest.h>
+
+#include <unsupported/Eigen/CXX11/Tensor>
+#include <vector>
-#include "HalInterfaces.h"
+#include "Multinomial.h"
#include "NeuralNetworksWrapper.h"
#include "philox_random.h"
#include "simple_philox.h"
-#include <gmock/gmock-matchers.h>
-#include <gtest/gtest.h>
-#include <unsupported/Eigen/CXX11/Tensor>
-
namespace android {
namespace nn {
namespace wrapper {
diff --git a/nn/common/operations/Neg.cpp b/nn/common/operations/Neg.cpp
index 48d962c9a..bf2172704 100644
--- a/nn/common/operations/Neg.cpp
+++ b/nn/common/operations/Neg.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
inline bool compute(const T* input, const Shape& shape, T* output) {
const auto size = getNumberOfElements(shape);
diff --git a/nn/common/operations/PRelu.cpp b/nn/common/operations/PRelu.cpp
index a799a84cb..7e0c8c371 100644
--- a/nn/common/operations/PRelu.cpp
+++ b/nn/common/operations/PRelu.cpp
@@ -19,7 +19,6 @@
#include <algorithm>
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -31,8 +30,6 @@ namespace android {
namespace nn {
namespace prelu {
-using namespace hal;
-
constexpr char kOperationName[] = "PRELU";
constexpr uint32_t kNumInputs = 2;
diff --git a/nn/common/operations/Pooling.cpp b/nn/common/operations/Pooling.cpp
index 3ffa70fb9..62594c783 100644
--- a/nn/common/operations/Pooling.cpp
+++ b/nn/common/operations/Pooling.cpp
@@ -22,15 +22,12 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace pooling {
constexpr uint32_t kInputTensor = 0;
@@ -334,8 +331,7 @@ bool validate(OperationType opType, const IOperationValidationContext* context)
OperandType::INT32,
};
} else {
- NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation "
- << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << opType;
}
if (inputCount >= 10) {
diff --git a/nn/common/operations/Pow.cpp b/nn/common/operations/Pow.cpp
index 40c4adf02..03892a230 100644
--- a/nn/common/operations/Pow.cpp
+++ b/nn/common/operations/Pow.cpp
@@ -17,11 +17,11 @@
#define LOG_TAG "Operations"
#include "Pow.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationsUtils.h"
#include <cmath>
+#include <vector>
namespace android {
namespace nn {
@@ -29,8 +29,6 @@ namespace pow {
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* baseData, const Shape& baseShape, const T* exponentData,
const Shape& exponentShape, T* outputData, const Shape& outputShape) {
@@ -81,7 +79,7 @@ bool eval(const void* baseData, const Shape& baseShape, const void* exponentData
reinterpret_cast<float*>(outputData), outputShape);
} break;
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(baseShape.type);
+ LOG(ERROR) << "Unsupported data type: " << baseShape.type;
return false;
}
}
diff --git a/nn/common/operations/QLSTM.cpp b/nn/common/operations/QLSTM.cpp
index 3b2dd0508..68a9489fc 100644
--- a/nn/common/operations/QLSTM.cpp
+++ b/nn/common/operations/QLSTM.cpp
@@ -101,8 +101,6 @@ inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor
} // namespace
-using hal::OperandType;
-
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
diff --git a/nn/common/operations/Quantize.cpp b/nn/common/operations/Quantize.cpp
index fa04bdd01..943a33dcb 100644
--- a/nn/common/operations/Quantize.cpp
+++ b/nn/common/operations/Quantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) {
NNTRACE_COMP("quantizeToQuant8");
@@ -75,10 +72,10 @@ bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
inputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported input operand type for QUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for QUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM ||
outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported output operand type for QUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for QUANTIZE op: " << outputType;
if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
return validateHalVersion(context, HalVersion::V1_3);
} else {
@@ -121,8 +118,7 @@ bool execute(IOperationExecutionContext* context) {
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: "
- << toString(inputType)
- << " output type: " << toString(context->getOutputType(kOutputTensor))
+ << inputType << " output type: " << context->getOutputType(kOutputTensor)
<< ")";
}
diff --git a/nn/common/operations/QuantizedLSTM.cpp b/nn/common/operations/QuantizedLSTM.cpp
index e059026ff..f07bc0a40 100644
--- a/nn/common/operations/QuantizedLSTM.cpp
+++ b/nn/common/operations/QuantizedLSTM.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
@@ -34,8 +33,6 @@ namespace nn {
namespace {
-using namespace hal;
-
template <typename T>
inline T* GetBuffer(RunTimeOperandInfo* operand) {
return reinterpret_cast<T*>(operand->buffer);
diff --git a/nn/common/operations/QuantizedLSTM.h b/nn/common/operations/QuantizedLSTM.h
index 76e74c638..61963c03f 100644
--- a/nn/common/operations/QuantizedLSTM.h
+++ b/nn/common/operations/QuantizedLSTM.h
@@ -28,9 +28,9 @@ struct RunTimeOperandInfo;
class QuantizedLSTMCell {
public:
- QuantizedLSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ QuantizedLSTMCell(const Operation& operation, RunTimeOperandInfo* operands);
- static bool prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* cellStateShape, Shape* outputShape);
bool eval();
diff --git a/nn/common/operations/RNN.cpp b/nn/common/operations/RNN.cpp
index 259c0915e..f584f0e1b 100644
--- a/nn/common/operations/RNN.cpp
+++ b/nn/common/operations/RNN.cpp
@@ -22,15 +22,12 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
RNN::RNN(const Operation& operation, RunTimeOperandInfo* operands) {
NNTRACE_TRANS("RNN::RNN");
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/nn/common/operations/RNN.h b/nn/common/operations/RNN.h
index 245eb1df3..0a5765b2e 100644
--- a/nn/common/operations/RNN.h
+++ b/nn/common/operations/RNN.h
@@ -20,7 +20,7 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -30,9 +30,9 @@ struct Shape;
class RNN {
public:
- RNN(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ RNN(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* hiddenStateShape, Shape* outputShape);
bool Eval();
diff --git a/nn/common/operations/Rank.cpp b/nn/common/operations/Rank.cpp
index 5f744375d..8a6931beb 100644
--- a/nn/common/operations/Rank.cpp
+++ b/nn/common/operations/Rank.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Utils.h"
@@ -34,19 +33,19 @@ constexpr uint32_t kOutputScalar = 0;
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
- hal::OperandType inputType = context->getInputType(kInputTensor);
- NN_RET_CHECK(inputType == hal::OperandType::TENSOR_FLOAT16 ||
- inputType == hal::OperandType::TENSOR_FLOAT32 ||
- inputType == hal::OperandType::TENSOR_INT32 ||
- inputType == hal::OperandType::TENSOR_QUANT8_ASYMM ||
- inputType == hal::OperandType::TENSOR_QUANT16_SYMM ||
- inputType == hal::OperandType::TENSOR_BOOL8 ||
- inputType == hal::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
- inputType == hal::OperandType::TENSOR_QUANT16_ASYMM ||
- inputType == hal::OperandType::TENSOR_QUANT8_SYMM ||
- inputType == hal::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Incorrect input type for a RANK op: " << toString(inputType);
- NN_RET_CHECK(validateOutputTypes(context, {hal::OperandType::INT32}));
+ OperandType inputType = context->getInputType(kInputTensor);
+ NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
+ inputType == OperandType::TENSOR_FLOAT32 ||
+ inputType == OperandType::TENSOR_INT32 ||
+ inputType == OperandType::TENSOR_QUANT8_ASYMM ||
+ inputType == OperandType::TENSOR_QUANT16_SYMM ||
+ inputType == OperandType::TENSOR_BOOL8 ||
+ inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ inputType == OperandType::TENSOR_QUANT16_ASYMM ||
+ inputType == OperandType::TENSOR_QUANT8_SYMM ||
+ inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+ << "Incorrect input type for a RANK op: " << inputType;
+ NN_RET_CHECK(validateOutputTypes(context, {OperandType::INT32}));
return validateHalVersion(context, HalVersion::V1_3);
}
diff --git a/nn/common/operations/Reduce.cpp b/nn/common/operations/Reduce.cpp
index 220a4dcad..c56771cc3 100644
--- a/nn/common/operations/Reduce.cpp
+++ b/nn/common/operations/Reduce.cpp
@@ -22,7 +22,6 @@
#include <limits>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@ constexpr _Float16 kFloat16Lowest = -kFloat16Max;
namespace {
-using namespace hal;
-
template <typename T>
inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) {
const Shape inputShape = context->getInputShape(kInputTensor);
diff --git a/nn/common/operations/ResizeImageOps.cpp b/nn/common/operations/ResizeImageOps.cpp
index c33abaf54..90420994c 100644
--- a/nn/common/operations/ResizeImageOps.cpp
+++ b/nn/common/operations/ResizeImageOps.cpp
@@ -23,15 +23,12 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
namespace android {
namespace nn {
-using namespace hal;
-
namespace resize_image {
constexpr uint32_t kNumInputs = 4;
@@ -178,7 +175,7 @@ bool validate(OperationType opType, const IOperationValidationContext* context)
} else if (opType == OperationType::RESIZE_NEAREST_NEIGHBOR) {
NN_RET_CHECK(numInputs >= kNumInputs && numInputs <= kNumInputs + kNumOptionalInputs);
} else {
- NN_RET_CHECK_FAIL() << "Unsupported operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported operation " << opType;
}
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
auto inputType = context->getInputType(kInputTensor);
@@ -188,7 +185,7 @@ bool validate(OperationType opType, const IOperationValidationContext* context)
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported tensor type for operation " << getOperationName(opType);
+ << "Unsupported tensor type for operation " << opType;
if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) {
NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
}
@@ -258,7 +255,7 @@ bool prepare(OperationType opType, IOperationExecutionContext* context) {
static_cast<float>(inWidth) *
static_cast<float>(context->getInputValue<_Float16>(kOutputWidthParamScalar)));
} else {
- NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << opType;
}
NN_RET_CHECK_GT(height, 0);
NN_RET_CHECK_GT(width, 0);
@@ -304,8 +301,7 @@ bool execute(OperationType opType, IOperationExecutionContext* context) {
context->getOutputShape(kOutputTensor));
default:
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation "
- << getOperationName(opType);
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType;
}
}
diff --git a/nn/common/operations/RoiAlign.cpp b/nn/common/operations/RoiAlign.cpp
index b9daf45a3..01008cc9d 100644
--- a/nn/common/operations/RoiAlign.cpp
+++ b/nn/common/operations/RoiAlign.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -51,8 +50,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T_Input, typename T_Roi>
inline bool roiAlignNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/nn/common/operations/RoiPooling.cpp b/nn/common/operations/RoiPooling.cpp
index a4f8214b7..373669aab 100644
--- a/nn/common/operations/RoiPooling.cpp
+++ b/nn/common/operations/RoiPooling.cpp
@@ -22,7 +22,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
#include "Tracing.h"
@@ -48,8 +47,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T_Input, typename T_Roi>
inline bool roiPoolingNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData,
const Shape& roiShape, const int32_t* batchSplitData,
diff --git a/nn/common/operations/SVDF.cpp b/nn/common/operations/SVDF.cpp
index 83148389a..953e2a84f 100644
--- a/nn/common/operations/SVDF.cpp
+++ b/nn/common/operations/SVDF.cpp
@@ -20,7 +20,6 @@
#include "CpuExecutor.h"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include <algorithm>
#include <vector>
@@ -29,8 +28,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
SVDF::SVDF(const Operation& operation, RunTimeOperandInfo* operands) {
NNTRACE_TRANS("SVDF::SVDF");
input_ = GetInput(operation, operands, kInputTensor);
diff --git a/nn/common/operations/SVDF.h b/nn/common/operations/SVDF.h
index ca9b54e13..da185687c 100644
--- a/nn/common/operations/SVDF.h
+++ b/nn/common/operations/SVDF.h
@@ -23,7 +23,7 @@
#include <cmath>
#include <vector>
-#include "HalInterfaces.h"
+#include "nnapi/Types.h"
namespace android {
namespace nn {
@@ -38,10 +38,10 @@ struct Shape;
class SVDF {
public:
- SVDF(const hal::Operation& operation, RunTimeOperandInfo* operands);
+ SVDF(const Operation& operation, RunTimeOperandInfo* operands);
- static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands,
- Shape* stateShape, Shape* outputShape);
+ static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* stateShape,
+ Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/nn/common/operations/Select.cpp b/nn/common/operations/Select.cpp
index 202659560..91053896d 100644
--- a/nn/common/operations/Select.cpp
+++ b/nn/common/operations/Select.cpp
@@ -16,7 +16,6 @@
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -35,8 +34,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool compute(const bool8* conditionData, const Shape& conditionShape, const T* aData,
const Shape& aShape, const T* bData, const Shape& bShape, T* outputData,
@@ -78,7 +75,7 @@ bool validate(const IOperationValidationContext* context) {
inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for select op: " << toString(inputType);
+ << "Unsupported input operand type for select op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_BOOL8, inputType, inputType}));
NN_RET_CHECK(validateOutputTypes(context, {inputType}));
return validateHalVersion(context, HalVersion::V1_2);
diff --git a/nn/common/operations/Slice.cpp b/nn/common/operations/Slice.cpp
index 3c4f2faa3..1b5a493f7 100644
--- a/nn/common/operations/Slice.cpp
+++ b/nn/common/operations/Slice.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "Operations"
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
@@ -37,8 +36,6 @@ constexpr uint32_t kSizeTensor = 2;
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-using namespace hal;
-
namespace {
template <typename T>
diff --git a/nn/common/operations/Softmax.cpp b/nn/common/operations/Softmax.cpp
index a986390e1..bb85c0b66 100644
--- a/nn/common/operations/Softmax.cpp
+++ b/nn/common/operations/Softmax.cpp
@@ -25,7 +25,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta,
int32_t axis, float* outputData, const Shape& outputShape) {
NNTRACE_TRANS("softmaxFloatSlow32");
diff --git a/nn/common/operations/Squeeze.cpp b/nn/common/operations/Squeeze.cpp
index 276461d1e..d7345505c 100644
--- a/nn/common/operations/Squeeze.cpp
+++ b/nn/common/operations/Squeeze.cpp
@@ -20,7 +20,6 @@
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "Tracing.h"
@@ -36,8 +35,6 @@ constexpr uint32_t kSqueezeDims = 1;
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-using namespace hal;
-
bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
@@ -46,7 +43,7 @@ bool validate(const IOperationValidationContext* context) {
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for SQUEEZE op: " << toString(inputType);
+ << "Unsupported input operand type for SQUEEZE op: " << inputType;
HalVersion minSupportedHalVersion;
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/nn/common/operations/StridedSlice.cpp b/nn/common/operations/StridedSlice.cpp
index 5ff5aeca8..3bb3a829d 100644
--- a/nn/common/operations/StridedSlice.cpp
+++ b/nn/common/operations/StridedSlice.cpp
@@ -23,7 +23,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Operations.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool compute(const T* inputData, const Shape& inputShape, const int32_t* beginData,
const int32_t* endData, const int32_t* stridesData, int32_t beginMask, int32_t endMask,
@@ -107,7 +104,7 @@ bool validate(const IOperationValidationContext* context) {
inputType == OperandType::TENSOR_FLOAT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for STRIDED_SLICE op: " << toString(inputType);
+ << "Unsupported input operand type for STRIDED_SLICE op: " << inputType;
HalVersion minSupportedHalVersion;
if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
diff --git a/nn/common/operations/Tile.cpp b/nn/common/operations/Tile.cpp
index 517d75e7a..af17df1b9 100644
--- a/nn/common/operations/Tile.cpp
+++ b/nn/common/operations/Tile.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include "Tile.h"
-#include "HalInterfaces.h"
#include "Tracing.h"
namespace android {
@@ -29,8 +28,6 @@ namespace tile {
namespace {
-using namespace hal;
-
template <typename T>
void CopyMultipleTimes(const T* in_data, int32_t in_size, int32_t multiplier, T* out_data) {
for (int i = 0; i < multiplier; ++i) {
diff --git a/nn/common/operations/TopK_V2.cpp b/nn/common/operations/TopK_V2.cpp
index e005b9a33..9e4ceeda8 100644
--- a/nn/common/operations/TopK_V2.cpp
+++ b/nn/common/operations/TopK_V2.cpp
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "OperationsUtils.h"
@@ -38,8 +37,6 @@ constexpr uint32_t kOutputIndicesTensor = 1;
namespace {
-using namespace hal;
-
template <typename T>
bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t k, T* valuesData,
int32_t* indicesData) {
@@ -85,7 +82,7 @@ bool validate(const IOperationValidationContext* context) {
inputType == OperandType::TENSOR_INT32 ||
inputType == OperandType::TENSOR_QUANT8_ASYMM ||
inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported input operand type for select op: " << toString(inputType);
+ << "Unsupported input operand type for select op: " << inputType;
NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32}));
NN_RET_CHECK(validateOutputTypes(context, {inputType, OperandType::TENSOR_INT32}));
HalVersion minSupportedHalVersion = HalVersion::V1_2;
@@ -132,7 +129,7 @@ bool execute(IOperationExecutionContext* context) {
return executeTyped<int8_t>(context);
} break;
default: {
- LOG(ERROR) << "Unsupported data type: " << toString(inputShape.type);
+ LOG(ERROR) << "Unsupported data type: " << inputShape.type;
return false;
}
}
diff --git a/nn/common/operations/Transpose.cpp b/nn/common/operations/Transpose.cpp
index ff70f9e8b..423b3ded6 100644
--- a/nn/common/operations/Transpose.cpp
+++ b/nn/common/operations/Transpose.cpp
@@ -19,7 +19,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
@@ -42,8 +41,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm,
const Shape& permShape, T* outputData, const Shape& outputShape) {
diff --git a/nn/common/operations/TransposeConv2D.cpp b/nn/common/operations/TransposeConv2D.cpp
index d67a473e6..0ee5d044c 100644
--- a/nn/common/operations/TransposeConv2D.cpp
+++ b/nn/common/operations/TransposeConv2D.cpp
@@ -25,7 +25,6 @@
#include <vector>
#include "CpuOperationUtils.h"
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -46,8 +45,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
// If possible we will use this static buffer for the tensor.
constexpr size_t kStaticBufferSize = 1605632;
char static_scratch_buffer[kStaticBufferSize];
@@ -452,7 +449,9 @@ bool validate(const IOperationValidationContext* context) {
filterType == inputType)
<< "Unsupported filter tensor type for operation " << kOperationName;
if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
+ NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .channelDim,
0)
<< "Unsupported filter tensor channel dimension for operation "
<< kOperationName;
@@ -570,7 +569,9 @@ bool execute(IOperationExecutionContext* context) {
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param,
context->getOutputBuffer<uint8_t>(kOutputTensor),
@@ -595,7 +596,9 @@ bool execute(IOperationExecutionContext* context) {
context->getInputShape(kInputTensor),
context->getInputBuffer<int8_t>(kFilterTensor),
context->getInputShape(kFilterTensor),
- context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
+ std::get<Operand::SymmPerChannelQuantParams>(
+ context->getInputExtraParams(kFilterTensor))
+ .scales.data(),
context->getInputBuffer<int32_t>(kBiasTensor),
context->getInputShape(kBiasTensor), param,
context->getOutputBuffer<int8_t>(kOutputTensor),
diff --git a/nn/common/operations/UnidirectionalSequenceLSTM.cpp b/nn/common/operations/UnidirectionalSequenceLSTM.cpp
index 03854f65f..9a00e1f01 100644
--- a/nn/common/operations/UnidirectionalSequenceLSTM.cpp
+++ b/nn/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -18,7 +18,6 @@
#include <vector>
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "LSTM.h"
#include "OperationResolver.h"
@@ -88,8 +87,6 @@ constexpr uint32_t kCellStateOutTensor = 2;
namespace {
-using namespace hal;
-
inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) {
return context->getInputBuffer(tensor) != nullptr;
}
@@ -157,7 +154,7 @@ bool validate(const IOperationValidationContext* context) {
} else {
NN_RET_CHECK_FAIL()
<< "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_LSTM op: "
- << toString(inputType);
+ << inputType;
}
HalVersion minHalVersionSupported = HalVersion::V1_2;
if (context->getNumOutputs() == kNumOutputsWithState) {
diff --git a/nn/common/operations/UnidirectionalSequenceRNN.cpp b/nn/common/operations/UnidirectionalSequenceRNN.cpp
index 273b7017a..aa79739ec 100644
--- a/nn/common/operations/UnidirectionalSequenceRNN.cpp
+++ b/nn/common/operations/UnidirectionalSequenceRNN.cpp
@@ -20,9 +20,9 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "OperationResolver.h"
#include "RNN.h"
+#include "nnapi/TypeUtils.h"
namespace android {
namespace nn {
@@ -44,8 +44,6 @@ constexpr uint32_t kStateOutputTensor = 1;
namespace {
-using namespace hal;
-
template <typename T>
void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) {
const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0);
@@ -135,7 +133,7 @@ bool validate(const IOperationValidationContext* context) {
OperandType inputType = context->getInputType(kInputTensor);
if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) {
LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: "
- << toString(inputType);
+ << inputType;
return false;
}
NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, inputType, inputType, inputType,
diff --git a/nn/driver/sample/SampleDriver.cpp b/nn/driver/sample/SampleDriver.cpp
index b6303acd1..61e2b8bfb 100644
--- a/nn/driver/sample/SampleDriver.cpp
+++ b/nn/driver/sample/SampleDriver.cpp
@@ -47,8 +47,6 @@ namespace sample_driver {
namespace {
-using namespace hal;
-
using time_point = std::chrono::steady_clock::time_point;
auto now() {
@@ -61,174 +59,185 @@ auto microsecondsDuration(decltype(now()) end, decltype(now()) start) {
} // namespace
-static const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+static const V1_2::Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-Return<void> SampleDriver::getCapabilities(getCapabilities_cb cb) {
+hardware::Return<void> SampleDriver::getCapabilities(getCapabilities_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getCapabilities");
- return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
- // TODO(dgross): Do we need to check compliantWithV1_0(capabilities)?
- cb(convertToV1_0(error), convertToV1_0(capabilities));
- });
+ return getCapabilities_1_3(
+ [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) {
+ // TODO(dgross): Do we need to check compliantWithV1_0(capabilities)?
+ cb(convertToV1_0(error), convertToV1_0(capabilities));
+ });
}
-Return<void> SampleDriver::getCapabilities_1_1(getCapabilities_1_1_cb cb) {
+hardware::Return<void> SampleDriver::getCapabilities_1_1(getCapabilities_1_1_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getCapabilities_1_1");
- return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
- // TODO(dgross): Do we need to check compliantWithV1_1(capabilities)?
- cb(convertToV1_0(error), convertToV1_1(capabilities));
- });
+ return getCapabilities_1_3(
+ [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) {
+ // TODO(dgross): Do we need to check compliantWithV1_1(capabilities)?
+ cb(convertToV1_0(error), convertToV1_1(capabilities));
+ });
}
-Return<void> SampleDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+hardware::Return<void> SampleDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getCapabilities_1_2");
- return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
- // TODO(dgross): Do we need to check compliantWithV1_2(capabilities)?
- cb(convertToV1_0(error), convertToV1_2(capabilities));
- });
+ return getCapabilities_1_3(
+ [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) {
+ // TODO(dgross): Do we need to check compliantWithV1_2(capabilities)?
+ cb(convertToV1_0(error), convertToV1_2(capabilities));
+ });
}
-Return<void> SampleDriver::getVersionString(getVersionString_cb cb) {
+hardware::Return<void> SampleDriver::getVersionString(getVersionString_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getVersionString");
cb(V1_0::ErrorStatus::NONE, "JUST_AN_EXAMPLE");
- return Void();
+ return hardware::Void();
}
-Return<void> SampleDriver::getType(getType_cb cb) {
+hardware::Return<void> SampleDriver::getType(getType_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getType");
cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU);
- return Void();
+ return hardware::Void();
}
-Return<void> SampleDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
+hardware::Return<void> SampleDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getSupportedExtensions");
cb(V1_0::ErrorStatus::NONE, {/* No extensions. */});
- return Void();
+ return hardware::Void();
}
-Return<void> SampleDriver::getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb cb) {
+hardware::Return<void> SampleDriver::getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
"SampleDriver::getSupportedOperations");
if (!validateModel(model)) {
VLOG(DRIVER) << "getSupportedOperations";
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
- return getSupportedOperations_1_3(convertToV1_3(model),
- [&](ErrorStatus status, const hidl_vec<bool>& supported) {
- cb(convertToV1_0(status), supported);
- });
+ return getSupportedOperations_1_3(
+ convertToV1_3(model),
+ [&](V1_3::ErrorStatus status, const hardware::hidl_vec<bool>& supported) {
+ cb(convertToV1_0(status), supported);
+ });
}
-Return<void> SampleDriver::getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb cb) {
+hardware::Return<void> SampleDriver::getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
"SampleDriver::getSupportedOperations_1_1");
if (!validateModel(model)) {
VLOG(DRIVER) << "getSupportedOperations_1_1";
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
- return getSupportedOperations_1_3(convertToV1_3(model),
- [&](ErrorStatus status, const hidl_vec<bool>& supported) {
- cb(convertToV1_0(status), supported);
- });
+ return getSupportedOperations_1_3(
+ convertToV1_3(model),
+ [&](V1_3::ErrorStatus status, const hardware::hidl_vec<bool>& supported) {
+ cb(convertToV1_0(status), supported);
+ });
}
-Return<void> SampleDriver::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
+hardware::Return<void> SampleDriver::getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
"SampleDriver::getSupportedOperations_1_2");
if (!validateModel(model)) {
VLOG(DRIVER) << "getSupportedOperations_1_2";
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
- return getSupportedOperations_1_3(convertToV1_3(model),
- [&](ErrorStatus status, const hidl_vec<bool>& supported) {
- cb(convertToV1_0(status), supported);
- });
+ return getSupportedOperations_1_3(
+ convertToV1_3(model),
+ [&](V1_3::ErrorStatus status, const hardware::hidl_vec<bool>& supported) {
+ cb(convertToV1_0(status), supported);
+ });
}
-Return<void> SampleDriver::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) {
+hardware::Return<void> SampleDriver::getNumberOfCacheFilesNeeded(
+ getNumberOfCacheFilesNeeded_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getNumberOfCacheFilesNeeded");
// Set both numbers to be 0 for cache not supported.
cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/0, /*numDataCache=*/0);
- return Void();
+ return hardware::Void();
}
-Return<V1_0::ErrorStatus> SampleDriver::prepareModel(
+hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModel(
const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel");
- const ErrorStatus status = prepareModelBase(
- model, this, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, callback);
+ const V1_3::ErrorStatus status =
+ prepareModelBase(model, this, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER,
+ kDefaultPriority13, {}, callback);
return convertToV1_0(status);
}
-Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_1");
- const ErrorStatus status =
- prepareModelBase(model, this, preference, kDefaultPriority, {}, callback);
+ const V1_3::ErrorStatus status =
+ prepareModelBase(model, this, preference, kDefaultPriority13, {}, callback);
return convertToV1_0(status);
}
-Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_2");
- const ErrorStatus status =
- prepareModelBase(model, this, preference, kDefaultPriority, {}, callback);
+ const V1_3::ErrorStatus status =
+ prepareModelBase(model, this, preference, kDefaultPriority13, {}, callback);
return convertToV1_0(status);
}
-Return<V1_3::ErrorStatus> SampleDriver::prepareModel_1_3(
- const V1_3::Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+hardware::Return<V1_3::ErrorStatus> SampleDriver::prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline, const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_3");
return prepareModelBase(model, this, preference, priority, deadline, callback);
}
-Return<V1_0::ErrorStatus> SampleDriver::prepareModelFromCache(
- const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
"SampleDriver::prepareModelFromCache");
- notify(callback, ErrorStatus::GENERAL_FAILURE, nullptr);
+ notify(callback, V1_3::ErrorStatus::GENERAL_FAILURE, nullptr);
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
-Return<ErrorStatus> SampleDriver::prepareModelFromCache_1_3(
- const OptionalTimePoint& /*deadline*/, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+hardware::Return<V1_3::ErrorStatus> SampleDriver::prepareModelFromCache_1_3(
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
"SampleDriver::prepareModelFromCache_1_3");
- notify(callback, ErrorStatus::GENERAL_FAILURE, nullptr);
- return ErrorStatus::GENERAL_FAILURE;
+ notify(callback, V1_3::ErrorStatus::GENERAL_FAILURE, nullptr);
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
-Return<DeviceStatus> SampleDriver::getStatus() {
+hardware::Return<V1_0::DeviceStatus> SampleDriver::getStatus() {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_UNSPECIFIED, "SampleDriver::getStatus");
VLOG(DRIVER) << "getStatus()";
- return DeviceStatus::AVAILABLE;
+ return V1_0::DeviceStatus::AVAILABLE;
}
// Safely downcast an IPreparedModel object to SamplePreparedModel.
// This function will return nullptr if the IPreparedModel object is not originated from the sample
// driver process.
static const SamplePreparedModel* castToSamplePreparedModel(
- const sp<IPreparedModel>& preparedModel) {
+ const sp<V1_3::IPreparedModel>& preparedModel) {
if (preparedModel->isRemote()) {
return nullptr;
} else {
@@ -238,10 +247,11 @@ static const SamplePreparedModel* castToSamplePreparedModel(
}
}
-Return<void> SampleDriver::allocate(const V1_3::BufferDesc& desc,
- const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<V1_3::BufferRole>& inputRoles,
- const hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) {
+hardware::Return<void> SampleDriver::allocate(
+ const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) {
constexpr uint32_t kInvalidBufferToken = 0;
VLOG(DRIVER) << "SampleDriver::allocate";
@@ -258,14 +268,14 @@ Return<void> SampleDriver::allocate(const V1_3::BufferDesc& desc,
if (!validateMemoryDesc(desc, preparedModels, inputRoles, outputRoles, getModel, &roles,
&operand)) {
LOG(ERROR) << "SampleDriver::allocate -- validation failed.";
- cb(ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken);
- return Void();
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken);
+ return hardware::Void();
}
if (isExtensionOperandType(operand.type)) {
LOG(ERROR) << "SampleDriver::allocate -- does not support extension type.";
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
+ return hardware::Void();
}
// TODO(xusongw): Support allocating buffers with unknown dimensions or rank.
@@ -274,29 +284,29 @@ Return<void> SampleDriver::allocate(const V1_3::BufferDesc& desc,
<< ", dimensions = " << toString(operand.dimensions) << ", size = " << size;
if (size == 0) {
LOG(ERROR) << "SampleDriver::allocate -- does not support dynamic output shape.";
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
+ return hardware::Void();
}
- auto bufferWrapper = ManagedBuffer::create(size, std::move(roles), std::move(operand));
+ auto bufferWrapper = ManagedBuffer::create(size, std::move(roles), uncheckedConvert(operand));
if (bufferWrapper == nullptr) {
LOG(ERROR) << "SampleDriver::allocate -- not enough memory.";
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
+ return hardware::Void();
}
auto token = mBufferTracker->add(bufferWrapper);
if (token == nullptr) {
LOG(ERROR) << "SampleDriver::allocate -- BufferTracker returned invalid token.";
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken);
+ return hardware::Void();
}
const uint32_t tokenValue = token->get();
sp<SampleBuffer> sampleBuffer = new SampleBuffer(std::move(bufferWrapper), std::move(token));
VLOG(DRIVER) << "SampleDriver::allocate -- successfully allocates the requested memory";
- cb(ErrorStatus::NONE, std::move(sampleBuffer), tokenValue);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, std::move(sampleBuffer), tokenValue);
+ return hardware::Void();
}
int SampleDriver::run() {
@@ -318,43 +328,45 @@ static void copyRunTimePoolInfos(const RunTimePoolInfo& srcPool, const RunTimePo
dstPool.flush();
}
-Return<ErrorStatus> SampleBuffer::copyTo(const hidl_memory& dst) {
- const auto dstPool = RunTimePoolInfo::createFromHidlMemory(dst);
+hardware::Return<V1_3::ErrorStatus> SampleBuffer::copyTo(const hardware::hidl_memory& dst) {
+ const auto dstPool = RunTimePoolInfo::createFromMemory(uncheckedConvert(dst));
if (!dstPool.has_value()) {
LOG(ERROR) << "SampleBuffer::copyTo -- unable to map dst memory.";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
- const ErrorStatus validationStatus = kBuffer->validateCopyTo(dstPool->getSize());
- if (validationStatus != ErrorStatus::NONE) {
+ const V1_3::ErrorStatus validationStatus =
+ convertToV1_3(kBuffer->validateCopyTo(dstPool->getSize()));
+ if (validationStatus != V1_3::ErrorStatus::NONE) {
return validationStatus;
}
const auto srcPool = kBuffer->createRunTimePoolInfo();
copyRunTimePoolInfos(srcPool, dstPool.value());
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
-static ErrorStatus copyFromInternal(const hidl_memory& src, const hidl_vec<uint32_t>& dimensions,
- const std::shared_ptr<ManagedBuffer>& bufferWrapper) {
+static V1_3::ErrorStatus copyFromInternal(const hardware::hidl_memory& src,
+ const hardware::hidl_vec<uint32_t>& dimensions,
+ const std::shared_ptr<ManagedBuffer>& bufferWrapper) {
CHECK(bufferWrapper != nullptr);
- const auto srcPool = RunTimePoolInfo::createFromHidlMemory(src);
+ const auto srcPool = RunTimePoolInfo::createFromMemory(uncheckedConvert(src));
if (!srcPool.has_value()) {
LOG(ERROR) << "SampleBuffer::copyFrom -- unable to map src memory.";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
- const ErrorStatus validationStatus =
- bufferWrapper->validateCopyFrom(dimensions, srcPool->getSize());
- if (validationStatus != ErrorStatus::NONE) {
+ const V1_3::ErrorStatus validationStatus =
+ convertToV1_3(bufferWrapper->validateCopyFrom(dimensions, srcPool->getSize()));
+ if (validationStatus != V1_3::ErrorStatus::NONE) {
return validationStatus;
}
const auto dstPool = bufferWrapper->createRunTimePoolInfo();
copyRunTimePoolInfos(srcPool.value(), dstPool);
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
-Return<ErrorStatus> SampleBuffer::copyFrom(const hidl_memory& src,
- const hidl_vec<uint32_t>& dimensions) {
+hardware::Return<V1_3::ErrorStatus> SampleBuffer::copyFrom(
+ const hardware::hidl_memory& src, const hardware::hidl_vec<uint32_t>& dimensions) {
const auto status = copyFromInternal(src, dimensions, kBuffer);
- if (status == ErrorStatus::NONE) {
+ if (status == V1_3::ErrorStatus::NONE) {
kBuffer->updateDimensions(dimensions);
kBuffer->setInitialized(true);
} else {
@@ -364,12 +376,12 @@ Return<ErrorStatus> SampleBuffer::copyFrom(const hidl_memory& src,
}
bool SamplePreparedModel::initialize() {
- return setRunTimePoolInfosFromHidlMemories(&mPoolInfos, mModel.pools);
+ return setRunTimePoolInfosFromCanonicalMemories(&mPoolInfos, uncheckedConvert(mModel.pools));
}
-static std::tuple<ErrorStatus, std::vector<RunTimePoolInfo>,
+static std::tuple<V1_3::ErrorStatus, std::vector<RunTimePoolInfo>,
std::vector<std::shared_ptr<ManagedBuffer>>>
-createRunTimePoolInfos(const Request& request, const SampleDriver& driver,
+createRunTimePoolInfos(const V1_3::Request& request, const SampleDriver& driver,
const SamplePreparedModel* preparedModel) {
std::vector<RunTimePoolInfo> requestPoolInfos;
std::vector<std::shared_ptr<ManagedBuffer>> bufferWrappers;
@@ -378,23 +390,24 @@ createRunTimePoolInfos(const Request& request, const SampleDriver& driver,
for (uint32_t i = 0; i < request.pools.size(); i++) {
auto& pool = request.pools[i];
switch (pool.getDiscriminator()) {
- case Request::MemoryPool::hidl_discriminator::hidlMemory: {
- auto buffer = RunTimePoolInfo::createFromHidlMemory(pool.hidlMemory());
+ case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: {
+ auto buffer =
+ RunTimePoolInfo::createFromMemory(uncheckedConvert(pool.hidlMemory()));
if (!buffer.has_value()) {
LOG(ERROR) << "createRuntimeMemoriesFromMemoryPools -- could not map pools";
- return {ErrorStatus::GENERAL_FAILURE, {}, {}};
+ return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, {}};
}
requestPoolInfos.push_back(std::move(*buffer));
bufferWrappers.push_back(nullptr);
} break;
- case Request::MemoryPool::hidl_discriminator::token: {
+ case V1_3::Request::MemoryPool::hidl_discriminator::token: {
auto bufferWrapper = driver.getBufferTracker()->get(pool.token());
if (bufferWrapper == nullptr) {
- return {ErrorStatus::INVALID_ARGUMENT, {}, {}};
+ return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, {}};
}
- const auto validationStatus =
- bufferWrapper->validateRequest(i, request, preparedModel);
- if (validationStatus != ErrorStatus::NONE) {
+ const auto validationStatus = convertToV1_3(bufferWrapper->validateRequest(
+ i, uncheckedConvert(request), preparedModel));
+ if (validationStatus != V1_3::ErrorStatus::NONE) {
return {validationStatus, {}, {}};
}
requestPoolInfos.push_back(bufferWrapper->createRunTimePoolInfo());
@@ -402,63 +415,63 @@ createRunTimePoolInfos(const Request& request, const SampleDriver& driver,
} break;
}
}
- return {ErrorStatus::NONE, std::move(requestPoolInfos), std::move(bufferWrappers)};
+ return {V1_3::ErrorStatus::NONE, std::move(requestPoolInfos), std::move(bufferWrappers)};
}
-static ErrorStatus updateDeviceMemories(
- ErrorStatus status, const Request& request,
+static V1_3::ErrorStatus updateDeviceMemories(
+ V1_3::ErrorStatus status, const V1_3::Request& request,
const std::vector<std::shared_ptr<ManagedBuffer>>& bufferWrappers,
- const hidl_vec<OutputShape>& outputShapes) {
- if (status == ErrorStatus::NONE) {
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) {
+ if (status == V1_3::ErrorStatus::NONE) {
for (uint32_t i = 0; i < request.outputs.size(); i++) {
const uint32_t poolIndex = request.outputs[i].location.poolIndex;
const auto& pool = request.pools[poolIndex];
- if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) {
+ if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) {
if (!bufferWrappers[poolIndex]->updateDimensions(outputShapes[i].dimensions)) {
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
}
for (uint32_t i = 0; i < request.outputs.size(); i++) {
const uint32_t poolIndex = request.outputs[i].location.poolIndex;
const auto& pool = request.pools[poolIndex];
- if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) {
+ if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) {
bufferWrappers[poolIndex]->setInitialized(true);
}
}
- } else if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ } else if (status == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
// If CpuExecutor reports OUTPUT_INSUFFCIENT_SIZE on a device memory, this is because the
// dimensions of the device memory are incorrectly specified. The driver should return
// GENERAL_FAILURE instead in this case.
for (uint32_t i = 0; i < request.outputs.size(); i++) {
const uint32_t poolIndex = request.outputs[i].location.poolIndex;
const auto& pool = request.pools[poolIndex];
- if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) {
+ if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) {
if (!outputShapes[i].isSufficient) {
LOG(ERROR) << "Invalid dimensions for output " << i
<< ": actual shape = " << toString(outputShapes[i].dimensions);
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
template <typename T_IExecutionCallback>
-void asyncExecute(const Request& request, MeasureTiming measure, time_point driverStart,
- const Model& model, const SampleDriver& driver,
+void asyncExecute(const V1_3::Request& request, V1_2::MeasureTiming measure, time_point driverStart,
+ const V1_3::Model& model, const SampleDriver& driver,
const SamplePreparedModel* preparedModel,
const std::vector<RunTimePoolInfo>& poolInfos,
const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const sp<T_IExecutionCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS,
"SampleDriver::asyncExecute");
const auto [poolStatus, requestPoolInfos, bufferWrappers] =
createRunTimePoolInfos(request, driver, preparedModel);
- if (poolStatus != ErrorStatus::NONE) {
+ if (poolStatus != V1_3::ErrorStatus::NONE) {
notify(callback, poolStatus, {}, kNoTiming);
return;
}
@@ -467,32 +480,34 @@ void asyncExecute(const Request& request, MeasureTiming measure, time_point driv
"SampleDriver::asyncExecute");
CpuExecutor executor = driver.getExecutor();
if (loopTimeoutDuration.getDiscriminator() !=
- OptionalTimeoutDuration::hidl_discriminator::none) {
+ V1_3::OptionalTimeoutDuration::hidl_discriminator::none) {
executor.setLoopTimeout(loopTimeoutDuration.nanoseconds());
}
if (deadline.has_value()) {
executor.setDeadline(*deadline);
}
time_point driverEnd, deviceStart, deviceEnd;
- if (measure == MeasureTiming::YES) deviceStart = now();
- int n = executor.run(model, request, poolInfos, requestPoolInfos);
- if (measure == MeasureTiming::YES) deviceEnd = now();
+ if (measure == V1_2::MeasureTiming::YES) deviceStart = now();
+ int n = executor.run(uncheckedConvert(model), uncheckedConvert(request), poolInfos,
+ requestPoolInfos);
+ if (measure == V1_2::MeasureTiming::YES) deviceEnd = now();
VLOG(DRIVER) << "executor.run returned " << n;
- ErrorStatus executionStatus = convertResultCodeToErrorStatus(n);
- hidl_vec<OutputShape> outputShapes = executor.getOutputShapes();
+ V1_3::ErrorStatus executionStatus = convertResultCodeToHalErrorStatus(n);
+ hardware::hidl_vec<V1_2::OutputShape> outputShapes = convertToV1_2(executor.getOutputShapes());
// Update device memory metadata.
- const ErrorStatus updateStatus =
+ const V1_3::ErrorStatus updateStatus =
updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes);
- if (updateStatus != ErrorStatus::NONE) {
+ if (updateStatus != V1_3::ErrorStatus::NONE) {
notify(callback, updateStatus, {}, kNoTiming);
return;
}
- if (measure == MeasureTiming::YES && executionStatus == ErrorStatus::NONE) {
+ if (measure == V1_2::MeasureTiming::YES && executionStatus == V1_3::ErrorStatus::NONE) {
driverEnd = now();
- Timing timing = {.timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
- .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))};
+ V1_2::Timing timing = {
+ .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
+ .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))};
VLOG(DRIVER) << "SampleDriver::asyncExecute timing = " << toString(timing);
notify(callback, executionStatus, outputShapes, timing);
} else {
@@ -501,30 +516,31 @@ void asyncExecute(const Request& request, MeasureTiming measure, time_point driv
}
template <typename T_IExecutionCallback>
-ErrorStatus executeBase(const Request& request, MeasureTiming measure, const Model& model,
- const SampleDriver& driver, const SamplePreparedModel* preparedModel,
- const std::vector<RunTimePoolInfo>& poolInfos,
- const OptionalTimePoint& halDeadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<T_IExecutionCallback>& callback) {
+V1_3::ErrorStatus executeBase(const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::Model& model, const SampleDriver& driver,
+ const SamplePreparedModel* preparedModel,
+ const std::vector<RunTimePoolInfo>& poolInfos,
+ const V1_3::OptionalTimePoint& halDeadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<T_IExecutionCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SampleDriver::executeBase");
VLOG(DRIVER) << "executeBase(" << SHOW_IF_DEBUG(toString(request)) << ")";
time_point driverStart;
- if (measure == MeasureTiming::YES) driverStart = now();
+ if (measure == V1_2::MeasureTiming::YES) driverStart = now();
if (callback.get() == nullptr) {
LOG(ERROR) << "invalid callback passed to executeBase";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (!validateRequest(request, model)) {
- notify(callback, ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
- return ErrorStatus::INVALID_ARGUMENT;
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- notify(callback, ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming);
- return ErrorStatus::NONE;
+ notify(callback, V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming);
+ return V1_3::ErrorStatus::NONE;
}
// This thread is intentionally detached because the sample driver service
@@ -535,57 +551,61 @@ ErrorStatus executeBase(const Request& request, MeasureTiming measure, const Mod
deadline, loopTimeoutDuration, callback);
}).detach();
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
-Return<V1_0::ErrorStatus> SamplePreparedModel::execute(
+hardware::Return<V1_0::ErrorStatus> SamplePreparedModel::execute(
const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) {
- const ErrorStatus status = executeBase(convertToV1_3(request), MeasureTiming::NO, mModel,
- *mDriver, this, mPoolInfos, {}, {}, callback);
+ const V1_3::ErrorStatus status =
+ executeBase(convertToV1_3(request), V1_2::MeasureTiming::NO, mModel, *mDriver, this,
+ mPoolInfos, {}, {}, callback);
return convertToV1_0(status);
}
-Return<V1_0::ErrorStatus> SamplePreparedModel::execute_1_2(
- const V1_0::Request& request, MeasureTiming measure,
+hardware::Return<V1_0::ErrorStatus> SamplePreparedModel::execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback) {
- const ErrorStatus status = executeBase(convertToV1_3(request), measure, mModel, *mDriver, this,
- mPoolInfos, {}, {}, callback);
+ const V1_3::ErrorStatus status = executeBase(convertToV1_3(request), measure, mModel, *mDriver,
+ this, mPoolInfos, {}, {}, callback);
return convertToV1_0(status);
}
-Return<V1_3::ErrorStatus> SamplePreparedModel::execute_1_3(
- const V1_3::Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+hardware::Return<V1_3::ErrorStatus> SamplePreparedModel::execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const sp<V1_3::IExecutionCallback>& callback) {
return executeBase(request, measure, mModel, *mDriver, this, mPoolInfos, deadline,
loopTimeoutDuration, callback);
}
-static std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> executeSynchronouslyBase(
- const Request& request, MeasureTiming measure, const Model& model,
- const SampleDriver& driver, const SamplePreparedModel* preparedModel,
- const std::vector<RunTimePoolInfo>& poolInfos, const OptionalTimePoint& halDeadline,
- const OptionalTimeoutDuration& loopTimeoutDuration) {
+static std::tuple<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing>
+executeSynchronouslyBase(const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::Model& model, const SampleDriver& driver,
+ const SamplePreparedModel* preparedModel,
+ const std::vector<RunTimePoolInfo>& poolInfos,
+ const V1_3::OptionalTimePoint& halDeadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
"SampleDriver::executeSynchronouslyBase");
VLOG(DRIVER) << "executeSynchronouslyBase(" << SHOW_IF_DEBUG(toString(request)) << ")";
time_point driverStart, driverEnd, deviceStart, deviceEnd;
- if (measure == MeasureTiming::YES) driverStart = now();
+ if (measure == V1_2::MeasureTiming::YES) driverStart = now();
if (!validateRequest(request, model)) {
- return {ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
+ return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- return {ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming};
+ return {V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming};
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS,
"SampleDriver::executeSynchronouslyBase");
const auto [poolStatus, requestPoolInfos, bufferWrappers] =
createRunTimePoolInfos(request, driver, preparedModel);
- if (poolStatus != ErrorStatus::NONE) {
+ if (poolStatus != V1_3::ErrorStatus::NONE) {
return {poolStatus, {}, kNoTiming};
}
@@ -593,93 +613,97 @@ static std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> executeSynchronous
"SampleDriver::executeSynchronouslyBase");
CpuExecutor executor = driver.getExecutor();
if (loopTimeoutDuration.getDiscriminator() !=
- OptionalTimeoutDuration::hidl_discriminator::none) {
+ V1_3::OptionalTimeoutDuration::hidl_discriminator::none) {
executor.setLoopTimeout(loopTimeoutDuration.nanoseconds());
}
if (deadline.has_value()) {
executor.setDeadline(*deadline);
}
- if (measure == MeasureTiming::YES) deviceStart = now();
- int n = executor.run(model, request, poolInfos, requestPoolInfos);
- if (measure == MeasureTiming::YES) deviceEnd = now();
+ if (measure == V1_2::MeasureTiming::YES) deviceStart = now();
+ int n = executor.run(uncheckedConvert(model), uncheckedConvert(request), poolInfos,
+ requestPoolInfos);
+ if (measure == V1_2::MeasureTiming::YES) deviceEnd = now();
VLOG(DRIVER) << "executor.run returned " << n;
- ErrorStatus executionStatus = convertResultCodeToErrorStatus(n);
- hidl_vec<OutputShape> outputShapes = executor.getOutputShapes();
+ V1_3::ErrorStatus executionStatus = convertResultCodeToHalErrorStatus(n);
+ hardware::hidl_vec<V1_2::OutputShape> outputShapes = convertToV1_2(executor.getOutputShapes());
// Update device memory metadata.
- const ErrorStatus updateStatus =
+ const V1_3::ErrorStatus updateStatus =
updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes);
- if (updateStatus != ErrorStatus::NONE) {
+ if (updateStatus != V1_3::ErrorStatus::NONE) {
return {updateStatus, {}, kNoTiming};
}
- if (measure == MeasureTiming::YES && executionStatus == ErrorStatus::NONE) {
+ if (measure == V1_2::MeasureTiming::YES && executionStatus == V1_3::ErrorStatus::NONE) {
driverEnd = now();
- Timing timing = {.timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
- .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))};
+ V1_2::Timing timing = {
+ .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
+ .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))};
VLOG(DRIVER) << "executeSynchronouslyBase timing = " << toString(timing);
return {executionStatus, std::move(outputShapes), timing};
}
return {executionStatus, std::move(outputShapes), kNoTiming};
}
-Return<void> SamplePreparedModel::executeSynchronously(const V1_0::Request& request,
- MeasureTiming measure,
- executeSynchronously_cb cb) {
+hardware::Return<void> SamplePreparedModel::executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) {
auto [status, outputShapes, timing] = executeSynchronouslyBase(
convertToV1_3(request), measure, mModel, *mDriver, this, mPoolInfos, {}, {});
cb(convertToV1_0(status), std::move(outputShapes), timing);
- return Void();
+ return hardware::Void();
}
-Return<void> SamplePreparedModel::executeSynchronously_1_3(
- const V1_3::Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) {
+hardware::Return<void> SamplePreparedModel::executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) {
auto [status, outputShapes, timing] = executeSynchronouslyBase(
request, measure, mModel, *mDriver, this, mPoolInfos, deadline, loopTimeoutDuration);
cb(status, std::move(outputShapes), timing);
- return Void();
+ return hardware::Void();
}
// The sample driver will finish the execution and then return.
-Return<void> SamplePreparedModel::executeFenced(
- const hal::Request& request, const hidl_vec<hidl_handle>& waitFor, MeasureTiming measure,
- const OptionalTimePoint& halDeadline, const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration, executeFenced_cb cb) {
+hardware::Return<void> SamplePreparedModel::executeFenced(
+ const V1_3::Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& halDeadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
"SamplePreparedModel::executeFenced");
VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(toString(request)) << ")";
time_point driverStart, driverEnd, deviceStart, deviceEnd;
- if (measure == MeasureTiming::YES) driverStart = now();
+ if (measure == V1_2::MeasureTiming::YES) driverStart = now();
if (!validateRequest(request, mModel, /*allowUnspecifiedOutput=*/false)) {
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- cb(ErrorStatus::MISSED_DEADLINE_PERSISTENT, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
// Wait for the dependent events to signal
for (const auto& fenceHandle : waitFor) {
if (!fenceHandle.getNativeHandle()) {
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
int syncFenceFd = fenceHandle.getNativeHandle()->data[0];
if (syncWait(syncFenceFd, -1) != FenceState::SIGNALED) {
LOG(ERROR) << "syncWait failed";
- cb(ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
}
// Update deadline if the timeout duration is closer than the deadline.
auto closestDeadline = deadline;
- if (duration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none) {
+ if (duration.getDiscriminator() != V1_3::OptionalTimeoutDuration::hidl_discriminator::none) {
const auto timeoutDurationDeadline = makeDeadline(duration.nanoseconds());
if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) {
closestDeadline = timeoutDurationDeadline;
@@ -687,51 +711,52 @@ Return<void> SamplePreparedModel::executeFenced(
}
time_point driverStartAfterFence;
- if (measure == MeasureTiming::YES) driverStartAfterFence = now();
+ if (measure == V1_2::MeasureTiming::YES) driverStartAfterFence = now();
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS,
"SamplePreparedModel::executeFenced");
const auto [poolStatus, requestPoolInfos, bufferWrappers] =
createRunTimePoolInfos(request, *mDriver, this);
- if (poolStatus != ErrorStatus::NONE) {
- cb(poolStatus, hidl_handle(nullptr), nullptr);
- return Void();
+ if (poolStatus != V1_3::ErrorStatus::NONE) {
+ cb(poolStatus, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
"SamplePreparedModel::executeFenced");
CpuExecutor executor = mDriver->getExecutor();
if (loopTimeoutDuration.getDiscriminator() !=
- OptionalTimeoutDuration::hidl_discriminator::none) {
+ V1_3::OptionalTimeoutDuration::hidl_discriminator::none) {
executor.setLoopTimeout(loopTimeoutDuration.nanoseconds());
}
if (closestDeadline.has_value()) {
executor.setDeadline(*closestDeadline);
}
- if (measure == MeasureTiming::YES) deviceStart = now();
- int n = executor.run(mModel, request, mPoolInfos, requestPoolInfos);
- if (measure == MeasureTiming::YES) deviceEnd = now();
+ if (measure == V1_2::MeasureTiming::YES) deviceStart = now();
+ int n = executor.run(uncheckedConvert(mModel), uncheckedConvert(request), mPoolInfos,
+ requestPoolInfos);
+ if (measure == V1_2::MeasureTiming::YES) deviceEnd = now();
VLOG(DRIVER) << "executor.run returned " << n;
- ErrorStatus executionStatus = convertResultCodeToErrorStatus(n);
- if (executionStatus != ErrorStatus::NONE) {
- cb(executionStatus, hidl_handle(nullptr), nullptr);
- return Void();
+ V1_3::ErrorStatus executionStatus = convertResultCodeToHalErrorStatus(n);
+ if (executionStatus != V1_3::ErrorStatus::NONE) {
+ cb(executionStatus, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
// Set output memories to the initialized state.
- if (executionStatus == ErrorStatus::NONE) {
+ if (executionStatus == V1_3::ErrorStatus::NONE) {
for (const auto& output : request.outputs) {
const uint32_t poolIndex = output.location.poolIndex;
const auto& pool = request.pools[poolIndex];
- if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) {
+ if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) {
bufferWrappers[poolIndex]->setInitialized(true);
}
}
}
- Timing timingSinceLaunch = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
- Timing timingAfterFence = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
- if (measure == MeasureTiming::YES) {
+ V1_2::Timing timingSinceLaunch = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+ V1_2::Timing timingAfterFence = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+ if (measure == V1_2::MeasureTiming::YES) {
driverEnd = now();
timingSinceLaunch = {
.timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
@@ -744,8 +769,8 @@ Return<void> SamplePreparedModel::executeFenced(
}
sp<SampleFencedExecutionCallback> fencedExecutionCallback =
new SampleFencedExecutionCallback(timingSinceLaunch, timingAfterFence, executionStatus);
- cb(executionStatus, hidl_handle(nullptr), fencedExecutionCallback);
- return Void();
+ cb(executionStatus, hardware::hidl_handle(nullptr), fencedExecutionCallback);
+ return hardware::Void();
}
// BurstExecutorWithCache maps hidl_memory when it is first seen, and preserves
@@ -755,7 +780,7 @@ Return<void> SamplePreparedModel::executeFenced(
// unmapping the memory on each execution.
class BurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache {
public:
- BurstExecutorWithCache(const Model& model, const SampleDriver* driver,
+ BurstExecutorWithCache(const V1_3::Model& model, const SampleDriver* driver,
const std::vector<RunTimePoolInfo>& poolInfos)
: mModel(model), mDriver(driver), mModelPoolInfos(poolInfos) {}
@@ -764,20 +789,20 @@ class BurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCa
return (it != mMemoryCache.end()) && it->second.has_value();
}
- void addCacheEntry(const hidl_memory& memory, int32_t slot) override {
- mMemoryCache[slot] = RunTimePoolInfo::createFromHidlMemory(memory);
+ void addCacheEntry(const hardware::hidl_memory& memory, int32_t slot) override {
+ mMemoryCache[slot] = RunTimePoolInfo::createFromMemory(uncheckedConvert(memory));
}
void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); }
- std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
+ std::tuple<V1_0::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> execute(
const V1_0::Request& request, const std::vector<int32_t>& slots,
- MeasureTiming measure) override {
+ V1_2::MeasureTiming measure) override {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
"BurstExecutorWithCache::execute");
time_point driverStart, driverEnd, deviceStart, deviceEnd;
- if (measure == MeasureTiming::YES) driverStart = now();
+ if (measure == V1_2::MeasureTiming::YES) driverStart = now();
// ensure all relevant pools are valid
if (!std::all_of(slots.begin(), slots.end(),
@@ -786,13 +811,13 @@ class BurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCa
}
// finish the request object (for validation)
- hidl_vec<Request::MemoryPool> pools(slots.size());
+ hardware::hidl_vec<V1_3::Request::MemoryPool> pools(slots.size());
std::transform(slots.begin(), slots.end(), pools.begin(), [this](int32_t slot) {
- Request::MemoryPool pool;
- pool.hidlMemory(mMemoryCache[slot]->getHidlMemory());
+ V1_3::Request::MemoryPool pool;
+ pool.hidlMemory(convertToV1_0(mMemoryCache[slot]->getMemory()));
return pool;
});
- Request fullRequest = {.inputs = request.inputs, .outputs = request.outputs};
+ V1_3::Request fullRequest = {.inputs = request.inputs, .outputs = request.outputs};
fullRequest.pools = std::move(pools);
// validate request object against the model
@@ -811,15 +836,17 @@ class BurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCa
// because burst does not support HAL 1.3 and hence does not support
// WHILE loops.
CpuExecutor executor = mDriver->getExecutor();
- if (measure == MeasureTiming::YES) deviceStart = now();
- int n = executor.run(mModel, fullRequest, mModelPoolInfos, requestPoolInfos);
- if (measure == MeasureTiming::YES) deviceEnd = now();
+ if (measure == V1_2::MeasureTiming::YES) deviceStart = now();
+ int n = executor.run(uncheckedConvert(mModel), uncheckedConvert(fullRequest),
+ mModelPoolInfos, requestPoolInfos);
+ if (measure == V1_2::MeasureTiming::YES) deviceEnd = now();
VLOG(DRIVER) << "executor.run returned " << n;
- V1_0::ErrorStatus executionStatus = convertToV1_0(convertResultCodeToErrorStatus(n));
- hidl_vec<OutputShape> outputShapes = executor.getOutputShapes();
- if (measure == MeasureTiming::YES && executionStatus == V1_0::ErrorStatus::NONE) {
+ V1_0::ErrorStatus executionStatus = convertToV1_0(convertResultCodeToHalErrorStatus(n));
+ hardware::hidl_vec<V1_2::OutputShape> outputShapes =
+ convertToV1_2(executor.getOutputShapes());
+ if (measure == V1_2::MeasureTiming::YES && executionStatus == V1_0::ErrorStatus::NONE) {
driverEnd = now();
- Timing timing = {
+ V1_2::Timing timing = {
.timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)),
.timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))};
VLOG(DRIVER) << "BurstExecutorWithCache::execute timing = " << toString(timing);
@@ -830,7 +857,7 @@ class BurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCa
}
private:
- const Model mModel;
+ const V1_3::Model mModel;
const SampleDriver* const mDriver;
const std::vector<RunTimePoolInfo> mModelPoolInfos;
std::map<int32_t, std::optional<RunTimePoolInfo>> mMemoryCache; // cached requestPoolInfos
@@ -852,7 +879,7 @@ static std::chrono::microseconds getPollingTimeWindow() {
#endif // NN_DEBUGGABLE
}
-Return<void> SamplePreparedModel::configureExecutionBurst(
+hardware::Return<void> SamplePreparedModel::configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -860,7 +887,7 @@ Return<void> SamplePreparedModel::configureExecutionBurst(
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION,
"SampleDriver::configureExecutionBurst");
- const bool preferPowerOverLatency = (kPreference == ExecutionPreference::LOW_POWER);
+ const bool preferPowerOverLatency = (kPreference == V1_1::ExecutionPreference::LOW_POWER);
const auto pollingTimeWindow =
(preferPowerOverLatency ? std::chrono::microseconds{0} : getPollingTimeWindow());
@@ -883,7 +910,7 @@ Return<void> SamplePreparedModel::configureExecutionBurst(
cb(V1_0::ErrorStatus::NONE, burst);
}
- return Void();
+ return hardware::Void();
}
} // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriver.h b/nn/driver/sample/SampleDriver.h
index 3628ee6c6..2482a9f4c 100644
--- a/nn/driver/sample/SampleDriver.h
+++ b/nn/driver/sample/SampleDriver.h
@@ -36,16 +36,17 @@ namespace sample_driver {
using hardware::MQDescriptorSync;
// Manages the data buffer for an operand.
-class SampleBuffer : public hal::IBuffer {
+class SampleBuffer : public V1_3::IBuffer {
public:
SampleBuffer(std::shared_ptr<ManagedBuffer> buffer, std::unique_ptr<BufferTracker::Token> token)
: kBuffer(std::move(buffer)), kToken(std::move(token)) {
CHECK(kBuffer != nullptr);
CHECK(kToken != nullptr);
}
- hal::Return<hal::ErrorStatus> copyTo(const hal::hidl_memory& dst) override;
- hal::Return<hal::ErrorStatus> copyFrom(const hal::hidl_memory& src,
- const hal::hidl_vec<uint32_t>& dimensions) override;
+ hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory& dst) override;
+ hardware::Return<V1_3::ErrorStatus> copyFrom(
+ const hardware::hidl_memory& src,
+ const hardware::hidl_vec<uint32_t>& dimensions) override;
private:
const std::shared_ptr<ManagedBuffer> kBuffer;
@@ -57,7 +58,7 @@ class SampleBuffer : public hal::IBuffer {
//
// Since these drivers simulate hardware, they must run the computations
// on the CPU. An actual driver would not do that.
-class SampleDriver : public hal::IDevice {
+class SampleDriver : public V1_3::IDevice {
public:
SampleDriver(const char* name,
const IOperationResolver* operationResolver = BuiltinOperationResolver::get())
@@ -66,51 +67,50 @@ class SampleDriver : public hal::IDevice {
mBufferTracker(BufferTracker::create()) {
android::nn::initVLogMask();
}
- hal::Return<void> getCapabilities(getCapabilities_cb cb) override;
- hal::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb cb) override;
- hal::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- hal::Return<void> getVersionString(getVersionString_cb cb) override;
- hal::Return<void> getType(getType_cb cb) override;
- hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb) override;
- hal::Return<void> getSupportedOperations(const hal::V1_0::Model& model,
- getSupportedOperations_cb cb) override;
- hal::Return<void> getSupportedOperations_1_1(const hal::V1_1::Model& model,
- getSupportedOperations_1_1_cb cb) override;
- hal::Return<void> getSupportedOperations_1_2(const hal::V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
- hal::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override;
- hal::Return<hal::V1_0::ErrorStatus> prepareModel(
- const hal::V1_0::Model& model,
- const sp<hal::V1_0::IPreparedModelCallback>& callback) override;
- hal::Return<hal::V1_0::ErrorStatus> prepareModel_1_1(
- const hal::V1_1::Model& model, hal::ExecutionPreference preference,
- const sp<hal::V1_0::IPreparedModelCallback>& callback) override;
- hal::Return<hal::V1_0::ErrorStatus> prepareModel_1_2(
- const hal::V1_2::Model& model, hal::ExecutionPreference preference,
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
- const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
- hal::Return<hal::V1_3::ErrorStatus> prepareModel_1_3(
- const hal::V1_3::Model& model, hal::ExecutionPreference preference,
- hal::Priority priority, const hal::OptionalTimePoint& deadline,
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
- const sp<hal::V1_3::IPreparedModelCallback>& callback) override;
- hal::Return<hal::V1_0::ErrorStatus> prepareModelFromCache(
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
- const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
- hal::Return<hal::V1_3::ErrorStatus> prepareModelFromCache_1_3(
- const hal::OptionalTimePoint& deadline,
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
- const sp<hal::V1_3::IPreparedModelCallback>& callback) override;
- hal::Return<hal::DeviceStatus> getStatus() override;
- hal::Return<void> allocate(const hal::V1_3::BufferDesc& desc,
- const hal::hidl_vec<sp<hal::V1_3::IPreparedModel>>& preparedModels,
- const hal::hidl_vec<hal::V1_3::BufferRole>& inputRoles,
- const hal::hidl_vec<hal::V1_3::BufferRole>& outputRoles,
- allocate_cb cb) override;
+ hardware::Return<void> getCapabilities(getCapabilities_cb cb) override;
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb cb) override;
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
+ hardware::Return<void> getVersionString(getVersionString_cb cb) override;
+ hardware::Return<void> getType(getType_cb cb) override;
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb) override;
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb cb) override;
+ hardware::Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb cb) override;
+ hardware::Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) override;
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override;
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
+ const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_0::DeviceStatus> getStatus() override;
+ hardware::Return<void> allocate(
+ const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) override;
// Starts and runs the driver service. Typically called from main().
// This will return only once the service shuts down.
@@ -125,10 +125,10 @@ class SampleDriver : public hal::IDevice {
const std::shared_ptr<BufferTracker> mBufferTracker;
};
-class SamplePreparedModel : public hal::IPreparedModel {
+class SamplePreparedModel : public V1_3::IPreparedModel {
public:
- SamplePreparedModel(const hal::Model& model, const SampleDriver* driver,
- hal::ExecutionPreference preference, uid_t userId, hal::Priority priority)
+ SamplePreparedModel(const V1_3::Model& model, const SampleDriver* driver,
+ V1_1::ExecutionPreference preference, uid_t userId, V1_3::Priority priority)
: mModel(model),
mDriver(driver),
kPreference(preference),
@@ -138,64 +138,63 @@ class SamplePreparedModel : public hal::IPreparedModel {
(void)kPriority;
}
bool initialize();
- hal::Return<hal::V1_0::ErrorStatus> execute(
- const hal::V1_0::Request& request,
- const sp<hal::V1_0::IExecutionCallback>& callback) override;
- hal::Return<hal::V1_0::ErrorStatus> execute_1_2(
- const hal::V1_0::Request& request, hal::MeasureTiming measure,
- const sp<hal::V1_2::IExecutionCallback>& callback) override;
- hal::Return<hal::V1_3::ErrorStatus> execute_1_3(
- const hal::V1_3::Request& request, hal::MeasureTiming measure,
- const hal::OptionalTimePoint& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<hal::V1_3::IExecutionCallback>& callback) override;
- hal::Return<void> executeSynchronously(const hal::V1_0::Request& request,
- hal::MeasureTiming measure,
- executeSynchronously_cb cb) override;
- hal::Return<void> executeSynchronously_1_3(
- const hal::V1_3::Request& request, hal::MeasureTiming measure,
- const hal::OptionalTimePoint& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override;
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override;
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) override;
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override;
+ hardware::Return<void> executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
executeSynchronously_1_3_cb cb) override;
- hal::Return<void> configureExecutionBurst(
- const sp<hal::V1_2::IBurstCallback>& callback,
- const MQDescriptorSync<hal::V1_2::FmqRequestDatum>& requestChannel,
- const MQDescriptorSync<hal::V1_2::FmqResultDatum>& resultChannel,
+ hardware::Return<void> configureExecutionBurst(
+ const sp<V1_2::IBurstCallback>& callback,
+ const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+ const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) override;
- hal::Return<void> executeFenced(const hal::Request& request,
- const hal::hidl_vec<hal::hidl_handle>& wait_for,
- hal::MeasureTiming measure,
- const hal::OptionalTimePoint& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& duration,
- executeFenced_cb callback) override;
- const hal::Model* getModel() const { return &mModel; }
+ hardware::Return<void> executeFenced(const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& wait_for,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration,
+ executeFenced_cb callback) override;
+ const V1_3::Model* getModel() const { return &mModel; }
protected:
- hal::Model mModel;
+ V1_3::Model mModel;
const SampleDriver* mDriver;
std::vector<RunTimePoolInfo> mPoolInfos;
- const hal::ExecutionPreference kPreference;
+ const V1_1::ExecutionPreference kPreference;
const uid_t kUserId;
- const hal::Priority kPriority;
+ const V1_3::Priority kPriority;
};
-class SampleFencedExecutionCallback : public hal::IFencedExecutionCallback {
+class SampleFencedExecutionCallback : public V1_3::IFencedExecutionCallback {
public:
- SampleFencedExecutionCallback(hal::Timing timingSinceLaunch, hal::Timing timingAfterFence,
- hal::ErrorStatus error)
+ SampleFencedExecutionCallback(V1_2::Timing timingSinceLaunch, V1_2::Timing timingAfterFence,
+ V1_3::ErrorStatus error)
: kTimingSinceLaunch(timingSinceLaunch),
kTimingAfterFence(timingAfterFence),
kErrorStatus(error) {}
- hal::Return<void> getExecutionInfo(getExecutionInfo_cb callback) override {
+ hardware::Return<void> getExecutionInfo(getExecutionInfo_cb callback) override {
callback(kErrorStatus, kTimingSinceLaunch, kTimingAfterFence);
- return hal::Void();
+ return hardware::Void();
}
private:
- const hal::Timing kTimingSinceLaunch;
- const hal::Timing kTimingAfterFence;
- const hal::ErrorStatus kErrorStatus;
+ const V1_2::Timing kTimingSinceLaunch;
+ const V1_2::Timing kTimingAfterFence;
+ const V1_3::ErrorStatus kErrorStatus;
};
} // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriverFloatFast.cpp b/nn/driver/sample/SampleDriverFloatFast.cpp
index 5d2cd1344..0ee467025 100644
--- a/nn/driver/sample/SampleDriverFloatFast.cpp
+++ b/nn/driver/sample/SampleDriverFloatFast.cpp
@@ -31,34 +31,32 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
class SampleDriverFloatFast : public SampleDriverPartial {
public:
SampleDriverFloatFast() : SampleDriverPartial("nnapi-sample_float_fast") {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
private:
std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override;
};
-Return<void> SampleDriverFloatFast::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> SampleDriverFloatFast::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- Capabilities capabilities = {
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f},
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}),
.ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
.whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
- update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32,
{.execTime = 0.8f, .powerUsage = 1.2f});
- update(&capabilities.operandPerformance, OperandType::FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32,
{.execTime = 0.8f, .powerUsage = 1.2f});
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
std::vector<bool> SampleDriverFloatFast::getSupportedOperationsImpl(
@@ -66,10 +64,10 @@ std::vector<bool> SampleDriverFloatFast::getSupportedOperationsImpl(
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) {
- const Operand& firstOperand = model.main.operands[operation.inputs[0]];
- supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32;
+ const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ supported[i] = firstOperand.type == V1_3::OperandType::TENSOR_FLOAT32;
}
}
return supported;
diff --git a/nn/driver/sample/SampleDriverFloatSlow.cpp b/nn/driver/sample/SampleDriverFloatSlow.cpp
index 1e6f0cb0d..009cd5af6 100644
--- a/nn/driver/sample/SampleDriverFloatSlow.cpp
+++ b/nn/driver/sample/SampleDriverFloatSlow.cpp
@@ -31,34 +31,32 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
class SampleDriverFloatSlow : public SampleDriverPartial {
public:
SampleDriverFloatSlow() : SampleDriverPartial("nnapi-sample_float_slow") {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
private:
std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override;
};
-Return<void> SampleDriverFloatSlow::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> SampleDriverFloatSlow::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- Capabilities capabilities = {
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.2f, .powerUsage = 0.6f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.2f, .powerUsage = 0.6f},
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}),
.ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
.whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
- update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32,
{.execTime = 1.3f, .powerUsage = 0.7f});
- update(&capabilities.operandPerformance, OperandType::FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32,
{.execTime = 1.3f, .powerUsage = 0.7f});
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
std::vector<bool> SampleDriverFloatSlow::getSupportedOperationsImpl(
@@ -66,10 +64,10 @@ std::vector<bool> SampleDriverFloatSlow::getSupportedOperationsImpl(
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) {
- const Operand& firstOperand = model.main.operands[operation.inputs[0]];
- supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32;
+ const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ supported[i] = firstOperand.type == V1_3::OperandType::TENSOR_FLOAT32;
}
}
return supported;
diff --git a/nn/driver/sample/SampleDriverFloatXNNPACK.cpp b/nn/driver/sample/SampleDriverFloatXNNPACK.cpp
index 3a8c0a2df..db24b59c0 100644
--- a/nn/driver/sample/SampleDriverFloatXNNPACK.cpp
+++ b/nn/driver/sample/SampleDriverFloatXNNPACK.cpp
@@ -43,20 +43,18 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
namespace {
-#define NN_DRIVER_RETURN_IF_ERROR(expr) \
- do { \
- ErrorStatus _errorCode = (expr); \
- if (_errorCode != ErrorStatus::NONE) { \
- return _errorCode; \
- } \
+#define NN_DRIVER_RETURN_IF_ERROR(expr) \
+ do { \
+ V1_3::ErrorStatus _errorCode = (expr); \
+ if (_errorCode != V1_3::ErrorStatus::NONE) { \
+ return _errorCode; \
+ } \
} while (0)
const size_t kNumOfWorkerThreads = 1;
-static const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+static const V1_2::Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
bool isScalarType(OperandType type) {
switch (type) {
@@ -72,13 +70,13 @@ bool isScalarType(OperandType type) {
}
void updateForArguments(const std::vector<uint32_t>& indexes,
- const hidl_vec<RequestArgument>& arguments,
+ const hardware::hidl_vec<V1_0::RequestArgument>& arguments,
const std::vector<RunTimePoolInfo>& requestPoolInfos,
RunTimeOperandInfo* operands) {
CHECK_EQ(indexes.size(), arguments.size());
for (size_t i = 0; i < indexes.size(); i++) {
const uint32_t operandIndex = indexes[i];
- const RequestArgument& from = arguments[i];
+ const V1_0::RequestArgument& from = arguments[i];
RunTimeOperandInfo& to = operands[operandIndex];
if (from.dimensions.size() > 0) {
// It's the responsibility of the caller to validate that
@@ -89,7 +87,7 @@ void updateForArguments(const std::vector<uint32_t>& indexes,
to.dimensions = from.dimensions;
}
if (from.hasNoValue) {
- to.lifetime = OperandLifeTime::NO_VALUE;
+ to.lifetime = Operand::LifeTime::NO_VALUE;
CHECK(to.buffer == nullptr);
to.length = 0;
} else {
@@ -108,30 +106,30 @@ void updateForArguments(const std::vector<uint32_t>& indexes,
}
std::vector<RunTimeOperandInfo> initializeRunTimeInfo(
- const Subgraph& subgraph, const std::vector<RunTimePoolInfo>& modelPoolInfos,
- const hidl_vec<uint8_t>* mModelOperandValues) {
+ const V1_3::Subgraph& subgraph, const std::vector<RunTimePoolInfo>& modelPoolInfos,
+ const hardware::hidl_vec<uint8_t>* mModelOperandValues) {
const size_t count = subgraph.operands.size();
std::vector<RunTimeOperandInfo> operands(count);
for (size_t i = 0; i < count; i++) {
- const Operand& from = subgraph.operands[i];
+ const V1_3::Operand& from = subgraph.operands[i];
RunTimeOperandInfo& to = operands[i];
- to.type = from.type;
+ to.type = uncheckedConvert(from.type);
to.dimensions = from.dimensions;
to.scale = from.scale;
to.zeroPoint = from.zeroPoint;
to.length = from.location.length;
- to.lifetime = from.lifetime;
- to.extraParams = from.extraParams;
+ to.lifetime = uncheckedConvert(from.lifetime);
+ to.extraParams = uncheckedConvert(from.extraParams);
switch (from.lifetime) {
- case OperandLifeTime::TEMPORARY_VARIABLE:
+ case V1_3::OperandLifeTime::TEMPORARY_VARIABLE:
to.buffer = nullptr;
to.numberOfUsesLeft = from.numberOfConsumers;
break;
- case OperandLifeTime::CONSTANT_COPY:
+ case V1_3::OperandLifeTime::CONSTANT_COPY:
to.buffer = const_cast<uint8_t*>(&(*mModelOperandValues)[from.location.offset]);
to.numberOfUsesLeft = 0;
break;
- case OperandLifeTime::CONSTANT_REFERENCE: {
+ case V1_3::OperandLifeTime::CONSTANT_REFERENCE: {
auto poolIndex = from.location.poolIndex;
CHECK_LT(poolIndex, modelPoolInfos.size());
auto& r = modelPoolInfos[poolIndex];
@@ -139,10 +137,10 @@ std::vector<RunTimeOperandInfo> initializeRunTimeInfo(
to.numberOfUsesLeft = 0;
break;
}
- case OperandLifeTime::SUBGRAPH:
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
- case OperandLifeTime::NO_VALUE:
+ case V1_3::OperandLifeTime::SUBGRAPH:
+ case V1_3::OperandLifeTime::SUBGRAPH_INPUT:
+ case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT:
+ case V1_3::OperandLifeTime::NO_VALUE:
to.buffer = nullptr;
to.numberOfUsesLeft = 0;
break;
@@ -155,7 +153,7 @@ std::vector<RunTimeOperandInfo> initializeRunTimeInfo(
class Subgraph {
public:
- static Subgraph* Create(const hidl_vec<Operation>& operations,
+ static Subgraph* Create(const hardware::hidl_vec<V1_3::Operation>& operations,
std::vector<RunTimeOperandInfo>& operands,
const std::vector<uint32_t>& inputIndexes,
const std::vector<uint32_t>& outputIndexes, pthreadpool_t threadpool,
@@ -182,13 +180,13 @@ class Subgraph {
std::vector<int> tensors(operands.size(), -1);
for (const auto& operation : operations) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ const std::vector<uint32_t>& ins = operation.inputs;
+ const std::vector<uint32_t>& outs = operation.outputs;
switch (operation.type) {
- case OperationType::MEAN:
- case OperationType::PAD:
- case OperationType::RESHAPE:
- case OperationType::RESIZE_BILINEAR:
+ case V1_3::OperationType::MEAN:
+ case V1_3::OperationType::PAD:
+ case V1_3::OperationType::RESHAPE:
+ case V1_3::OperationType::RESIZE_BILINEAR:
// Ignore the second input (axes, static padding, or new shape),
// because it is represented as parameters of the XNNPACK operator
// rather than extra input.
@@ -223,8 +221,9 @@ class Subgraph {
uint32_t flags = 0;
const void* data = nullptr;
- if (operands[tensors[t]].lifetime == OperandLifeTime::CONSTANT_COPY ||
- operands[tensors[t]].lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ if (operands[tensors[t]].lifetime == Operand::LifeTime::CONSTANT_COPY ||
+ operands[tensors[t]].lifetime == Operand::LifeTime::CONSTANT_REFERENCE ||
+ operands[tensors[t]].lifetime == Operand::LifeTime::POINTER) {
data = operands[tensors[t]].buffer;
}
if (inputs.count(t) != 0) {
@@ -254,7 +253,7 @@ class Subgraph {
// Create XNNPACK nodes for NNAPI Operations
for (const auto& operation : operations) {
if (VisitNode(subgraph.get(), operation, operands.data(), xnnpackTensors) !=
- ErrorStatus::NONE) {
+ V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "XNNPACK add op failed";
return nullptr;
}
@@ -269,9 +268,9 @@ class Subgraph {
return new Subgraph(runtimePtr, std::move(externals), useStaticBuffer);
}
- ErrorStatus Prepare() { return ErrorStatus::NONE; }
+ V1_3::ErrorStatus Prepare() { return V1_3::ErrorStatus::NONE; }
- ErrorStatus Invoke(RunTimeOperandInfo* operands) {
+ V1_3::ErrorStatus Invoke(RunTimeOperandInfo* operands) {
VLOG(DRIVER) << "Subgraph::Invoke() start";
if (!mUseStaticBuffer || mFirstRun) {
VLOG(DRIVER) << "Setup buffer for Subgraph";
@@ -288,7 +287,7 @@ class Subgraph {
xnn_setup_runtime(mRuntime.get(), externalValues.size(), externalValues.data());
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_setup_runtime FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
mFirstRun = false;
}
@@ -296,273 +295,273 @@ class Subgraph {
const xnn_status status = xnn_invoke_runtime(mRuntime.get());
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_invoke_runtime FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CalculatePadding(int padding, uint32_t* flags) {
+ static V1_3::ErrorStatus CalculatePadding(int padding, uint32_t* flags) {
switch (padding) {
case ANEURALNETWORKS_PADDING_SAME:
*flags = XNN_FLAG_TENSORFLOW_SAME_PADDING;
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
case ANEURALNETWORKS_PADDING_VALID:
*flags = 0;
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
default:
LOG(ERROR) << "invalid padding mode";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
- static ErrorStatus ConvertActivationToOutputRange(int activation, float* outputMin,
- float* outputMax) {
+ static V1_3::ErrorStatus ConvertActivationToOutputRange(int activation, float* outputMin,
+ float* outputMax) {
switch (activation) {
case ANEURALNETWORKS_FUSED_NONE:
*outputMin = -std::numeric_limits<float>::infinity();
*outputMax = +std::numeric_limits<float>::infinity();
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
case ANEURALNETWORKS_FUSED_RELU:
*outputMin = 0.0f;
*outputMax = +std::numeric_limits<float>::infinity();
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
case ANEURALNETWORKS_FUSED_RELU1:
*outputMin = -1.0f;
*outputMax = +1.0f;
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
case ANEURALNETWORKS_FUSED_RELU6:
*outputMin = 0.0f;
*outputMax = 6.0f;
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
default:
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
- static ErrorStatus CheckConvolutionParams(int32_t stride_width, int32_t stride_height,
- int32_t dilation_width_factor,
- int32_t dilation_height_factor) {
+ static V1_3::ErrorStatus CheckConvolutionParams(int32_t stride_width, int32_t stride_height,
+ int32_t dilation_width_factor,
+ int32_t dilation_height_factor) {
if (stride_width <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (stride_height <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (dilation_width_factor <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (dilation_height_factor <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckDepthwiseConvolutionParams(int32_t stride_width, int32_t stride_height,
- int32_t dilation_width_factor,
- int32_t dilation_height_factor,
- int32_t depth_multiplier,
- uint32_t output_channels) {
+ static V1_3::ErrorStatus CheckDepthwiseConvolutionParams(
+ int32_t stride_width, int32_t stride_height, int32_t dilation_width_factor,
+ int32_t dilation_height_factor, int32_t depth_multiplier, uint32_t output_channels) {
if (stride_width <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (stride_height <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (depth_multiplier <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (output_channels % depth_multiplier != 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (dilation_width_factor <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (dilation_height_factor <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckPoolingParams(int32_t stride_width, int32_t stride_height,
- int32_t filter_width, int32_t filter_height) {
+ static V1_3::ErrorStatus CheckPoolingParams(int32_t stride_width, int32_t stride_height,
+ int32_t filter_width, int32_t filter_height) {
if (stride_width <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (stride_height <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (filter_width <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (filter_height <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (filter_width == 1 && filter_height == 1 && std::max(stride_width, stride_height) > 1) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckNumInputsAndOutputs(const Operation& operation,
- uint32_t expected_num_inputs,
- uint32_t expected_num_outputs) {
+ static V1_3::ErrorStatus CheckNumInputsAndOutputs(const V1_3::Operation& operation,
+ uint32_t expected_num_inputs,
+ uint32_t expected_num_outputs) {
if (operation.inputs.size() != expected_num_inputs) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (operation.outputs.size() != expected_num_outputs) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckTensorType(OperandType tensor_type, OperandType expected_type) {
+ static V1_3::ErrorStatus CheckTensorType(OperandType tensor_type, OperandType expected_type) {
if (tensor_type != expected_type) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckTensorFloatType(OperandType tensor_type) {
+ static V1_3::ErrorStatus CheckTensorFloatType(OperandType tensor_type) {
if (tensor_type != OperandType::TENSOR_FLOAT32) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckTensorShape(std::vector<uint32_t>& dimensions, uint32_t min_num_dims,
- uint32_t max_num_dims) {
+ static V1_3::ErrorStatus CheckTensorShape(std::vector<uint32_t>& dimensions,
+ uint32_t min_num_dims, uint32_t max_num_dims) {
if (min_num_dims == max_num_dims) {
if (dimensions.size() != min_num_dims) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
} else {
if (dimensions.size() < min_num_dims || dimensions.size() > max_num_dims) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
for (size_t i = 0; i < dimensions.size(); i++) {
if (dimensions[i] <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckTensorShape(std::vector<uint32_t>& dimensions, int expected_num_dims) {
+ static V1_3::ErrorStatus CheckTensorShape(std::vector<uint32_t>& dimensions,
+ int expected_num_dims) {
return CheckTensorShape(dimensions, expected_num_dims, expected_num_dims);
}
- static ErrorStatus CheckSlopeTensorShape(std::vector<uint32_t>& dimensions) {
+ static V1_3::ErrorStatus CheckSlopeTensorShape(std::vector<uint32_t>& dimensions) {
if (dimensions.size() < 1) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
// Validate that all non-channel dimensions (if any) are exactly 1.
for (size_t i = 0; i < dimensions.size() - 1; i++) {
if (dimensions[i] != 1) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckAxesTensorShape(std::vector<uint32_t>& dimensions) {
+ static V1_3::ErrorStatus CheckAxesTensorShape(std::vector<uint32_t>& dimensions) {
if (dimensions.size() != 1) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckShapeTensorShape(std::vector<uint32_t>& dimensions) {
+ static V1_3::ErrorStatus CheckShapeTensorShape(std::vector<uint32_t>& dimensions) {
if (dimensions.size() != 1) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus CheckTensorStaticAllocation(OperandLifeTime lifetime) {
- if (lifetime != OperandLifeTime::CONSTANT_COPY &&
- lifetime != OperandLifeTime::CONSTANT_REFERENCE) {
- VLOG(DRIVER) << "CheckTensorStaticAllocation: " << toString(lifetime);
- return ErrorStatus::INVALID_ARGUMENT;
+ static V1_3::ErrorStatus CheckTensorStaticAllocation(Operand::LifeTime lifetime) {
+ if (lifetime != Operand::LifeTime::CONSTANT_COPY &&
+ lifetime != Operand::LifeTime::CONSTANT_REFERENCE &&
+ lifetime != Operand::LifeTime::POINTER) {
+ VLOG(DRIVER) << "CheckTensorStaticAllocation: " << toString(convertToV1_3(lifetime));
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
+ static V1_3::ErrorStatus VisitNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
switch (operation.type) {
- case OperationType::ABS:
+ case V1_3::OperationType::ABS:
return VisitAbsNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::ADD:
+ case V1_3::OperationType::ADD:
return VisitAddNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::AVERAGE_POOL_2D:
+ case V1_3::OperationType::AVERAGE_POOL_2D:
return VisitAveragePool2DNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::CONV_2D:
+ case V1_3::OperationType::CONV_2D:
return VisitConv2DNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::DEPTHWISE_CONV_2D:
+ case V1_3::OperationType::DEPTHWISE_CONV_2D:
return VisitDepthwiseConv2DNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::DIV:
+ case V1_3::OperationType::DIV:
return VisitDivNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::FLOOR:
+ case V1_3::OperationType::FLOOR:
return VisitFloorNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::FULLY_CONNECTED:
+ case V1_3::OperationType::FULLY_CONNECTED:
return VisitFullyConnectedNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::HARD_SWISH:
+ case V1_3::OperationType::HARD_SWISH:
return VisitHardSwishNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::LOGISTIC:
+ case V1_3::OperationType::LOGISTIC:
return VisitLogisticNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::MAX_POOL_2D:
+ case V1_3::OperationType::MAX_POOL_2D:
return VisitMaxPool2DNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::MAXIMUM:
+ case V1_3::OperationType::MAXIMUM:
return VisitMaximumNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::MEAN:
+ case V1_3::OperationType::MEAN:
return VisitMeanNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::MINIMUM:
+ case V1_3::OperationType::MINIMUM:
return VisitMinimumNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::MUL:
+ case V1_3::OperationType::MUL:
return VisitMulNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::NEG:
+ case V1_3::OperationType::NEG:
return VisitNegNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::PAD:
+ case V1_3::OperationType::PAD:
return VisitPadNode(subgraph, operation, operands, 0.0f, xnnpackTensors);
- case OperationType::PAD_V2:
+ case V1_3::OperationType::PAD_V2:
return VisitPadV2Node(subgraph, operation, operands, xnnpackTensors);
- case OperationType::RESHAPE:
+ case V1_3::OperationType::RESHAPE:
return VisitReshapeNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::RESIZE_BILINEAR:
+ case V1_3::OperationType::RESIZE_BILINEAR:
return VisitResizeBilinearNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::PRELU:
+ case V1_3::OperationType::PRELU:
return VisitPreluNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::RELU:
+ case V1_3::OperationType::RELU:
return VisitReluNode(subgraph, operation, operands, 0.0f,
std::numeric_limits<float>::infinity(), xnnpackTensors);
- case OperationType::RELU1:
+ case V1_3::OperationType::RELU1:
return VisitReluNode(subgraph, operation, operands, -1.0f, 1.0f, xnnpackTensors);
- case OperationType::RELU6:
+ case V1_3::OperationType::RELU6:
return VisitReluNode(subgraph, operation, operands, 0.0f, 6.0f, xnnpackTensors);
- case OperationType::SQRT:
+ case V1_3::OperationType::SQRT:
return VisitSqrtNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::SUB:
+ case V1_3::OperationType::SUB:
return VisitSubNode(subgraph, operation, operands, xnnpackTensors);
- case OperationType::SOFTMAX:
+ case V1_3::OperationType::SOFTMAX:
return VisitSoftmaxNode(subgraph, operation, operands, xnnpackTensors);
default:
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
- static ErrorStatus VisitAbsNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitAbsNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -572,17 +571,17 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_abs FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitAddNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitAddNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
@@ -602,17 +601,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_add2 FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitAveragePool2DNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitAveragePool2DNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
// Make sure all scalar params are constant.
@@ -629,7 +629,7 @@ class Subgraph {
}
if (use_nchw) {
VLOG(DRIVER) << "XNNPACK VisitAveragePool2DNode FAILED: only NHWC layout is supported";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
int32_t stride_width, stride_height, filter_width, filter_height, activation;
@@ -684,17 +684,18 @@ class Subgraph {
}
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_average_pooling_2d FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitConv2DNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitConv2DNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime));
@@ -715,7 +716,7 @@ class Subgraph {
}
if (use_nchw) {
VLOG(DRIVER) << "XNNPACK VisitConv2DNode FAILED: only NHWC layout is supported";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
int32_t stride_width, stride_height, activation;
@@ -781,18 +782,19 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], flags);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_convolution_2d FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitDepthwiseConv2DNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitDepthwiseConv2DNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime));
@@ -814,7 +816,7 @@ class Subgraph {
if (use_nchw) {
VLOG(DRIVER)
<< "XNNPACK VisitDepthwiseConv2DNode FAILED: only NHWC layout is supported";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
int32_t stride_width, stride_height, depth_multiplier, activation;
@@ -882,17 +884,17 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], flags);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_depthwise_convolution_2d FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitDivNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitDivNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
@@ -912,17 +914,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_divide FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitFullyConnectedNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitFullyConnectedNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime));
@@ -947,17 +950,18 @@ class Subgraph {
/*flags=*/XNN_FLAG_TENSORFLOW_RESHAPE_2D);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_fully_connected FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitFloorNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitFloorNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -968,17 +972,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_floor FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitHardSwishNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitHardSwishNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -988,17 +993,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_hardswish FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitLogisticNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitLogisticNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -1008,17 +1014,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_sigmoid FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitMaxPool2DNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitMaxPool2DNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
// Make sure all scalar params are constant.
@@ -1035,7 +1042,7 @@ class Subgraph {
}
if (use_nchw) {
VLOG(DRIVER) << "XNNPACK VisitMaxPool2DNode FAILED: only NHWC layout is supported";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
int32_t stride_width, stride_height, filter_width, filter_height, activation;
@@ -1091,17 +1098,18 @@ class Subgraph {
}
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_max_pooling_2d FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitMaximumNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitMaximumNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
@@ -1121,17 +1129,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_maximum2 FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitMeanNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitMeanNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorShape(operands[ins[0]].dimensions, 4));
NN_DRIVER_RETURN_IF_ERROR(CheckAxesTensorShape(operands[ins[1]].dimensions));
@@ -1143,17 +1152,17 @@ class Subgraph {
int keep_dims = getScalarData<int32_t>(operands[ins[2]]);
if (keep_dims <= 0) {
LOG(ERROR) << "XNNPACK VisitMeanNode FAILED: only support keep_dims";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
const int32_t* axes_buffer = reinterpret_cast<const int32_t*>(operands[ins[1]].buffer);
if (operands[ins[1]].dimensions[0] != 2) {
LOG(ERROR) << "XNNPACK VisitMeanNode FAILED: unsupported axes";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (std::min(axes_buffer[0], axes_buffer[1]) != 1 ||
std::max(axes_buffer[0], axes_buffer[1]) != 2) {
LOG(ERROR) << "XNNPACK VisitMeanNode FAILED: unsupported axes";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (subgraph != nullptr) {
const xnn_status status = xnn_define_global_average_pooling_2d(
@@ -1164,17 +1173,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_global_average_pooling_2d FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitMinimumNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitMinimumNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
@@ -1194,17 +1204,17 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_minimum2 FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitMulNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitMulNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
@@ -1224,17 +1234,17 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_multiply2 FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitNegNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitNegNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -1245,17 +1255,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_negate FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitPreluNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitPreluNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(
CheckTensorShape(operands[ins[0]].dimensions, 1, XNN_MAX_TENSOR_DIMS));
@@ -1272,17 +1283,17 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_prelu FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitPadNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands, float padding_value,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitPadNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands, float padding_value,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(
CheckTensorShape(operands[ins[0]].dimensions, 1, XNN_MAX_TENSOR_DIMS));
@@ -1293,7 +1304,7 @@ class Subgraph {
const int32_t* paddings_data = reinterpret_cast<const int32_t*>(operands[ins[1]].buffer);
for (size_t i = 0; i < operands[ins[1]].dimensions.size() * 2; i++) {
- if (paddings_data[i] < 0) return ErrorStatus::INVALID_ARGUMENT;
+ if (paddings_data[i] < 0) return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (subgraph != nullptr) {
std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings{};
@@ -1308,28 +1319,30 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_static_constant_pad FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitPadV2Node(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
+ static V1_3::ErrorStatus VisitPadV2Node(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
if (operands[ins[2]].type != OperandType::FLOAT32) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
float padding_value = getScalarData<float>(operands[ins[2]]);
return VisitPadNode(subgraph, operation, operands, padding_value, xnnpackTensors);
}
- static ErrorStatus VisitReshapeNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitReshapeNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(
CheckTensorShape(operands[ins[0]].dimensions, 0, XNN_MAX_TENSOR_DIMS));
@@ -1350,17 +1363,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_static_reshape FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitResizeBilinearNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitResizeBilinearNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorShape(operands[ins[0]].dimensions, 4));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -1375,7 +1389,7 @@ class Subgraph {
if (use_nchw) {
VLOG(DRIVER)
<< "XNNPACK VisitResizeBilinearNode FAILED: only NHWC layout is supported";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
@@ -1389,12 +1403,12 @@ class Subgraph {
float width_scale = getScalarData<float>(operands[ins[1]]);
float height_scale = getScalarData<float>(operands[ins[2]]);
if (width_scale <= 0 || height_scale <= 0) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
new_height = static_cast<size_t>(operands[ins[0]].dimensions[1] * height_scale);
new_width = static_cast<size_t>(operands[ins[0]].dimensions[2] * width_scale);
} else {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
bool align_corners = false;
@@ -1404,7 +1418,7 @@ class Subgraph {
half_pixel_centers = getScalarData<bool>(operands[ins[5]]);
}
if (align_corners && !half_pixel_centers) {
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (subgraph != nullptr) {
uint32_t flags = 0;
@@ -1419,17 +1433,19 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], flags);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_static_resize_bilinear_2d FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitReluNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands, float outputMin, float outputMax,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitReluNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands, float outputMin,
+ float outputMax,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -1440,17 +1456,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_clamp FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitSqrtNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitSqrtNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -1461,17 +1478,17 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_bankers_rounding FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitSubNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitSubNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
@@ -1491,17 +1508,18 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_subtract FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
- static ErrorStatus VisitSoftmaxNode(xnn_subgraph_t subgraph, const Operation& operation,
- RunTimeOperandInfo* operands,
- const std::vector<uint32_t>& xnnpackTensors) {
- const hidl_vec<uint32_t>& ins = operation.inputs;
- const hidl_vec<uint32_t>& outs = operation.outputs;
+ static V1_3::ErrorStatus VisitSoftmaxNode(xnn_subgraph_t subgraph,
+ const V1_3::Operation& operation,
+ RunTimeOperandInfo* operands,
+ const std::vector<uint32_t>& xnnpackTensors) {
+ const hardware::hidl_vec<uint32_t>& ins = operation.inputs;
+ const hardware::hidl_vec<uint32_t>& outs = operation.outputs;
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime));
NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type));
@@ -1509,14 +1527,14 @@ class Subgraph {
float beta = getScalarData<float>(operands[ins[1]]);
if (beta != 1.0f) {
LOG(ERROR) << "XNNPACK VisitSoftmaxNode FAILED, unsupported beta value: " << beta;
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (ins.size() >= 3) {
NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime));
int axis = getScalarData<int32_t>(operands[ins[2]]);
if (axis != -1) {
LOG(ERROR) << "XNNPACK VisitSoftmaxNode FAILED, unsupported axis value: " << axis;
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
}
if (subgraph != nullptr) {
@@ -1525,11 +1543,11 @@ class Subgraph {
/*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0);
if (status != xnn_status_success) {
LOG(ERROR) << "XNNPACK xnn_define_softmax FAILED";
- return ErrorStatus::GENERAL_FAILURE;
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
}
}
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
private:
@@ -1550,8 +1568,9 @@ class Subgraph {
class SamplePreparedModelXNNPACK : public SamplePreparedModel {
public:
- SamplePreparedModelXNNPACK(const Model& model, const SampleDriver* driver,
- ExecutionPreference preference, uid_t userId, Priority priority)
+ SamplePreparedModelXNNPACK(const V1_3::Model& model, const SampleDriver* driver,
+ V1_1::ExecutionPreference preference, uid_t userId,
+ V1_3::Priority priority)
: SamplePreparedModel(model, driver, preference, userId, priority),
mSubgraph(nullptr),
mThreadpool(nullptr) {}
@@ -1560,30 +1579,36 @@ class SamplePreparedModelXNNPACK : public SamplePreparedModel {
pthreadpool_destroy(mThreadpool);
};
bool initialize();
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override;
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override;
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<V1_3::IExecutionCallback>& callback) override;
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override;
- Return<void> executeSynchronously_1_3(const Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- executeSynchronously_1_3_cb cb) override;
- Return<void> configureExecutionBurst(
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override;
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override;
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) override;
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override;
+ hardware::Return<void> executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ executeSynchronously_1_3_cb cb) override;
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) override;
- Return<void> executeFenced(const Request& request, const hidl_vec<hidl_handle>& wait_for,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration,
- executeFenced_cb callback) override;
+ hardware::Return<void> executeFenced(const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& wait_for,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration,
+ executeFenced_cb callback) override;
private:
Subgraph* mSubgraph;
@@ -1591,14 +1616,14 @@ class SamplePreparedModelXNNPACK : public SamplePreparedModel {
pthreadpool* mThreadpool;
};
-Return<void> SamplePreparedModelXNNPACK::configureExecutionBurst(
+hardware::Return<void> SamplePreparedModelXNNPACK::configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) {
VLOG(DRIVER) << "SamplePreparedModelXNNPACK::configureExecutionBurst not supported";
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
}
bool SamplePreparedModelXNNPACK::initialize() {
@@ -1608,7 +1633,7 @@ bool SamplePreparedModelXNNPACK::initialize() {
VLOG(DRIVER) << "SamplePreparedModelXNNPACK::initialize failed to create pthreadpool, "
"fallback to single threaded execution";
}
- const Model* model = getModel();
+ const V1_3::Model* model = getModel();
mOperands = initializeRunTimeInfo(model->main, mPoolInfos, &model->operandValues);
mSubgraph = Subgraph::Create(model->main.operations, mOperands, model->main.inputIndexes,
model->main.outputIndexes, mThreadpool);
@@ -1616,20 +1641,20 @@ bool SamplePreparedModelXNNPACK::initialize() {
}
template <typename T_IExecutionCallback>
-void asyncExecuteXNNPACK(Subgraph* subgraph, RunTimeOperandInfo* operands, const Request& request,
- MeasureTiming measure, const Model& model,
- const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+void asyncExecuteXNNPACK(Subgraph* subgraph, RunTimeOperandInfo* operands,
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::Model& model, const std::optional<Deadline>& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const sp<T_IExecutionCallback>& callback) {
std::vector<RunTimePoolInfo> requestPoolInfos;
- if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, request.pools)) {
- notify(callback, ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, uncheckedConvert(request.pools))) {
+ notify(callback, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
}
updateForArguments(model.main.inputIndexes, request.inputs, requestPoolInfos, operands);
updateForArguments(model.main.outputIndexes, request.outputs, requestPoolInfos, operands);
auto status = subgraph->Invoke(operands);
VLOG(DRIVER) << "XNNPACK subgraph invoke returned " << toString(status);
- if (status == ErrorStatus::NONE) {
+ if (status == V1_3::ErrorStatus::NONE) {
VLOG(DRIVER) << "Completed run normally";
for (auto& runtimeInfo : requestPoolInfos) {
runtimeInfo.flush();
@@ -1639,25 +1664,26 @@ void asyncExecuteXNNPACK(Subgraph* subgraph, RunTimeOperandInfo* operands, const
}
template <typename T_IExecutionCallback>
-ErrorStatus executeXNNPACKBase(Subgraph* subgraph, RunTimeOperandInfo* operands,
- const Request& request, MeasureTiming measure, const Model& model,
- const OptionalTimePoint& halDeadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<T_IExecutionCallback>& callback) {
+V1_3::ErrorStatus executeXNNPACKBase(Subgraph* subgraph, RunTimeOperandInfo* operands,
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::Model& model,
+ const V1_3::OptionalTimePoint& halDeadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<T_IExecutionCallback>& callback) {
VLOG(DRIVER) << "executeXNNPACKBase(" << SHOW_IF_DEBUG(toString(request)) << ")";
if (callback.get() == nullptr) {
LOG(ERROR) << "invalid callback passed to executeXNNPACKBase";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (!validateRequest(request, model, /*allowUnspecifiedOutput=*/false)) {
- notify(callback, ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
- return ErrorStatus::INVALID_ARGUMENT;
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming);
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- notify(callback, ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming);
- return ErrorStatus::NONE;
+ notify(callback, V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming);
+ return V1_3::ErrorStatus::NONE;
}
// This thread is intentionally detached because the sample driver service
@@ -1668,60 +1694,63 @@ ErrorStatus executeXNNPACKBase(Subgraph* subgraph, RunTimeOperandInfo* operands,
loopTimeoutDuration, callback);
}).detach();
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
-Return<V1_0::ErrorStatus> SamplePreparedModelXNNPACK::execute(
+hardware::Return<V1_0::ErrorStatus> SamplePreparedModelXNNPACK::execute(
const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) {
- const Model* model = getModel();
- const ErrorStatus status =
+ const V1_3::Model* model = getModel();
+ const V1_3::ErrorStatus status =
executeXNNPACKBase(mSubgraph, mOperands.data(), convertToV1_3(request),
- MeasureTiming::NO, *model, {}, {}, callback);
+ V1_2::MeasureTiming::NO, *model, {}, {}, callback);
return convertToV1_0(status);
}
-Return<V1_0::ErrorStatus> SamplePreparedModelXNNPACK::execute_1_2(
- const V1_0::Request& request, MeasureTiming measure,
+hardware::Return<V1_0::ErrorStatus> SamplePreparedModelXNNPACK::execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback) {
- const Model* model = getModel();
- const ErrorStatus status = executeXNNPACKBase(
+ const V1_3::Model* model = getModel();
+ const V1_3::ErrorStatus status = executeXNNPACKBase(
mSubgraph, mOperands.data(), convertToV1_3(request), measure, *model, {}, {}, callback);
return convertToV1_0(status);
}
-Return<V1_3::ErrorStatus> SamplePreparedModelXNNPACK::execute_1_3(
- const V1_3::Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+hardware::Return<V1_3::ErrorStatus> SamplePreparedModelXNNPACK::execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
const sp<V1_3::IExecutionCallback>& callback) {
- const Model* model = getModel();
+ const V1_3::Model* model = getModel();
return executeXNNPACKBase(mSubgraph, mOperands.data(), request, measure, *model, deadline,
loopTimeoutDuration, callback);
}
-static std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> executeSynchronouslyXNNPACKBase(
- Subgraph* subgraph, RunTimeOperandInfo* operands, const Request& request,
- MeasureTiming measure, const Model& model, const OptionalTimePoint& halDeadline,
- const OptionalTimeoutDuration& loopTimeoutDuration) {
+static std::tuple<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing>
+executeSynchronouslyXNNPACKBase(Subgraph* subgraph, RunTimeOperandInfo* operands,
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::Model& model,
+ const V1_3::OptionalTimePoint& halDeadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) {
VLOG(DRIVER) << "executeSynchronouslyXNNPACKBase(" << SHOW_IF_DEBUG(toString(request)) << ")";
if (!validateRequest(request, model, /*allowUnspecifiedOutput=*/false)) {
- return {ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
+ return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming};
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- return {ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming};
+ return {V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming};
}
std::vector<RunTimePoolInfo> requestPoolInfos;
- if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, request.pools)) {
- return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming};
+ if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, uncheckedConvert(request.pools))) {
+ return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming};
}
updateForArguments(model.main.inputIndexes, request.inputs, requestPoolInfos, operands);
updateForArguments(model.main.outputIndexes, request.outputs, requestPoolInfos, operands);
VLOG(DRIVER) << "XNNPACK subgraph invoke started";
auto status = subgraph->Invoke(operands);
VLOG(DRIVER) << "XNNPACK subgraph invoke returned " << toString(status);
- if (status == ErrorStatus::NONE) {
+ if (status == V1_3::ErrorStatus::NONE) {
VLOG(DRIVER) << "Completed run normally";
for (auto& runtimeInfo : requestPoolInfos) {
runtimeInfo.flush();
@@ -1730,59 +1759,60 @@ static std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> executeSynchronous
return {status, {}, kNoTiming};
}
-Return<void> SamplePreparedModelXNNPACK::executeSynchronously(const V1_0::Request& request,
- MeasureTiming measure,
- executeSynchronously_cb cb) {
- const Model* model = getModel();
+hardware::Return<void> SamplePreparedModelXNNPACK::executeSynchronously(
+ const V1_0::Request& request, V1_2::MeasureTiming measure, executeSynchronously_cb cb) {
+ const V1_3::Model* model = getModel();
auto [status, outputShapes, timing] = executeSynchronouslyXNNPACKBase(
mSubgraph, mOperands.data(), convertToV1_3(request), measure, *model, {}, {});
cb(convertToV1_0(status), std::move(outputShapes), timing);
- return Void();
+ return hardware::Void();
}
-Return<void> SamplePreparedModelXNNPACK::executeSynchronously_1_3(
- const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) {
- const Model* model = getModel();
+hardware::Return<void> SamplePreparedModelXNNPACK::executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) {
+ const V1_3::Model* model = getModel();
auto [status, outputShapes, timing] = executeSynchronouslyXNNPACKBase(
mSubgraph, mOperands.data(), request, measure, *model, deadline, loopTimeoutDuration);
cb(status, std::move(outputShapes), timing);
- return Void();
+ return hardware::Void();
}
// The sample driver will finish the execution and then return.
-Return<void> SamplePreparedModelXNNPACK::executeFenced(
- const Request& request, const hidl_vec<hidl_handle>& waitFor, MeasureTiming measure,
- const OptionalTimePoint& halDeadline, const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration, executeFenced_cb cb) {
+hardware::Return<void> SamplePreparedModelXNNPACK::executeFenced(
+ const V1_3::Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& halDeadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb) {
VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(toString(request)) << ")";
- const Model* model = getModel();
+ const V1_3::Model* model = getModel();
if (!validateRequest(request, *model, /*allowUnspecifiedOutput=*/false)) {
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- cb(ErrorStatus::MISSED_DEADLINE_PERSISTENT, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
// Wait for the dependent events to signal
for (const auto& fenceHandle : waitFor) {
if (!fenceHandle.getNativeHandle()) {
- cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
int syncFenceFd = fenceHandle.getNativeHandle()->data[0];
if (syncWait(syncFenceFd, -1) != FenceState::SIGNALED) {
LOG(ERROR) << "syncWait failed";
- cb(ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
}
std::vector<RunTimePoolInfo> requestPoolInfos;
- if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, request.pools)) {
- cb(ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
+ if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, uncheckedConvert(request.pools))) {
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), nullptr);
}
updateForArguments(model->main.inputIndexes, request.inputs, requestPoolInfos,
mOperands.data());
@@ -1790,7 +1820,7 @@ Return<void> SamplePreparedModelXNNPACK::executeFenced(
mOperands.data());
auto status = mSubgraph->Invoke(mOperands.data());
VLOG(DRIVER) << "XNNPACK subgraph invoke returned " << toString(status);
- if (status == ErrorStatus::NONE) {
+ if (status == V1_3::ErrorStatus::NONE) {
VLOG(DRIVER) << "Completed run normally";
for (auto& runtimeInfo : requestPoolInfos) {
runtimeInfo.flush();
@@ -1799,47 +1829,49 @@ Return<void> SamplePreparedModelXNNPACK::executeFenced(
sp<SampleFencedExecutionCallback> fencedExecutionCallback =
new SampleFencedExecutionCallback(kNoTiming, kNoTiming, status);
- cb(status, hidl_handle(nullptr), fencedExecutionCallback);
- return Void();
+ cb(status, hardware::hidl_handle(nullptr), fencedExecutionCallback);
+ return hardware::Void();
}
class SampleDriverFloatXNNPACK : public SampleDriverPartial {
public:
SampleDriverFloatXNNPACK() : SampleDriverPartial("nnapi-sample_float_xnnpack") {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override;
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& callback) override;
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) override;
- Return<ErrorStatus> prepareModel_1_3(const Model& model, ExecutionPreference preference,
- Priority priority, const OptionalTimePoint& deadline,
- const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
- const sp<IPreparedModelCallback>& callback) override;
- Return<void> allocate(const V1_3::BufferDesc& desc,
- const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<V1_3::BufferRole>& inputRoles,
- const hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) override;
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) override;
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) override;
+ hardware::Return<void> allocate(
+ const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) override;
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override;
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override;
};
template <typename T_Model, typename T_IPreparedModelCallback>
-ErrorStatus prepareModelXNNPACK(const T_Model& model, const SampleDriver* driver,
- ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline,
- const sp<T_IPreparedModelCallback>& callback) {
+V1_3::ErrorStatus prepareModelXNNPACK(const T_Model& model, const SampleDriver* driver,
+ V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const sp<T_IPreparedModelCallback>& callback) {
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
if (callback.get() == nullptr) {
LOG(ERROR) << "invalid callback passed to prepareModelBase";
- return ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (VLOG_IS_ON(DRIVER)) {
VLOG(DRIVER) << "prepareModelBase";
@@ -1847,8 +1879,8 @@ ErrorStatus prepareModelXNNPACK(const T_Model& model, const SampleDriver* driver
}
if (!validateModel(model) || !validateExecutionPreference(preference) ||
!validatePriority(priority)) {
- notify(callback, ErrorStatus::INVALID_ARGUMENT, nullptr);
- return ErrorStatus::INVALID_ARGUMENT;
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
// asynchronously prepare the model from a new, detached thread
@@ -1856,77 +1888,81 @@ ErrorStatus prepareModelXNNPACK(const T_Model& model, const SampleDriver* driver
sp<SamplePreparedModelXNNPACK> preparedModel = new SamplePreparedModelXNNPACK(
convertToV1_3(model), driver, preference, userId, priority);
if (!preparedModel->initialize()) {
- notify(callback, ErrorStatus::INVALID_ARGUMENT, nullptr);
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
return;
}
- notify(callback, ErrorStatus::NONE, preparedModel);
+ notify(callback, V1_3::ErrorStatus::NONE, preparedModel);
}).detach();
- return ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
-Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel(
+hardware::Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel(
const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) {
- const ErrorStatus status = prepareModelXNNPACK(
- model, this, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, callback);
+ const V1_3::ErrorStatus status =
+ prepareModelXNNPACK(model, this, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER,
+ kDefaultPriority13, {}, callback);
return convertToV1_0(status);
}
-Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+hardware::Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& callback) {
- const ErrorStatus status =
- prepareModelXNNPACK(model, this, preference, kDefaultPriority, {}, callback);
+ const V1_3::ErrorStatus status =
+ prepareModelXNNPACK(model, this, preference, kDefaultPriority13, {}, callback);
return convertToV1_0(status);
}
-Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+hardware::Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) {
- const ErrorStatus status =
- prepareModelXNNPACK(model, this, preference, kDefaultPriority, {}, callback);
+ const V1_3::ErrorStatus status =
+ prepareModelXNNPACK(model, this, preference, kDefaultPriority13, {}, callback);
return convertToV1_0(status);
}
-Return<ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_3(
- const Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<IPreparedModelCallback>& callback) {
+hardware::Return<V1_3::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) {
return prepareModelXNNPACK(model, this, preference, priority, deadline, callback);
}
-Return<void> SampleDriverFloatXNNPACK::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> SampleDriverFloatXNNPACK::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "SampleDriverFloatXNNPACK::getCapabilities()";
- Capabilities capabilities = {
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f},
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}),
.ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
.whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
- update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32,
{.execTime = 0.8f, .powerUsage = 1.2f});
- update(&capabilities.operandPerformance, OperandType::FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32,
{.execTime = 0.8f, .powerUsage = 1.2f});
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
-std::vector<bool> SampleDriverFloatXNNPACK::getSupportedOperationsImpl(const Model& model) const {
+std::vector<bool> SampleDriverFloatXNNPACK::getSupportedOperationsImpl(
+ const V1_3::Model& model) const {
std::vector<RunTimePoolInfo> poolInfos;
- setRunTimePoolInfosFromHidlMemories(&poolInfos, model.pools);
+ setRunTimePoolInfosFromCanonicalMemories(&poolInfos, uncheckedConvert(model.pools));
auto operands = initializeRunTimeInfo(model.main, poolInfos, &model.operandValues);
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
bool isSupportedOp = false;
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (Subgraph::VisitNode(/*subgraph=*/nullptr, operation, operands.data(), {}) ==
- ErrorStatus::NONE) {
+ V1_3::ErrorStatus::NONE) {
isSupportedOp = true;
}
supported[i] = isSupportedOp;
@@ -1934,14 +1970,15 @@ std::vector<bool> SampleDriverFloatXNNPACK::getSupportedOperationsImpl(const Mod
return supported;
}
-Return<void> SampleDriverFloatXNNPACK::allocate(
- const V1_3::BufferDesc& desc, const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<V1_3::BufferRole>& inputRoles, const hidl_vec<V1_3::BufferRole>& outputRoles,
- allocate_cb cb) {
+hardware::Return<void> SampleDriverFloatXNNPACK::allocate(
+ const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) {
VLOG(DRIVER) << "SampleDriverFloatXNNPACK::allocate not supported";
constexpr uint32_t kInvalidBufferToken = 0;
- cb(ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken);
- return Void();
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken);
+ return hardware::Void();
}
} // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriverFull.cpp b/nn/driver/sample/SampleDriverFull.cpp
index e0f15eaa3..efd5e60a3 100644
--- a/nn/driver/sample/SampleDriverFull.cpp
+++ b/nn/driver/sample/SampleDriverFull.cpp
@@ -27,37 +27,35 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
-Return<void> SampleDriverFull::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> SampleDriverFull::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities_1_3()";
- Capabilities capabilities = {
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = mPerf,
.relaxedFloat32toFloat16PerformanceTensor = mPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(mPerf),
.ifPerformance = mPerf,
.whilePerformance = mPerf};
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
-Return<void> SampleDriverFull::getSupportedOperations_1_3(const V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) {
+hardware::Return<void> SampleDriverFull::getSupportedOperations_1_3(
+ const V1_3::Model& model, getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations_1_3()";
if (validateModel(model)) {
const size_t count = model.main.operations.size();
std::vector<bool> supported(count, true);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
supported[i] = !isExtensionOperationType(operation.type);
}
- cb(ErrorStatus::NONE, supported);
+ cb(V1_3::ErrorStatus::NONE, supported);
} else {
std::vector<bool> supported;
- cb(ErrorStatus::INVALID_ARGUMENT, supported);
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, supported);
}
- return Void();
+ return hardware::Void();
}
} // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriverFull.h b/nn/driver/sample/SampleDriverFull.h
index 155463a85..ecf5c76b0 100644
--- a/nn/driver/sample/SampleDriverFull.h
+++ b/nn/driver/sample/SampleDriverFull.h
@@ -26,14 +26,14 @@ namespace sample_driver {
class SampleDriverFull : public SampleDriver {
public:
- SampleDriverFull(const char* name, hal::PerformanceInfo perf)
+ SampleDriverFull(const char* name, V1_0::PerformanceInfo perf)
: SampleDriver(name), mPerf(perf) {}
- hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
- hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
private:
- hal::PerformanceInfo mPerf;
+ V1_0::PerformanceInfo mPerf;
};
} // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriverMinimal.cpp b/nn/driver/sample/SampleDriverMinimal.cpp
index 3b0f15d23..eef99374e 100644
--- a/nn/driver/sample/SampleDriverMinimal.cpp
+++ b/nn/driver/sample/SampleDriverMinimal.cpp
@@ -31,34 +31,32 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
class SampleDriverMinimal : public SampleDriverPartial {
public:
SampleDriverMinimal() : SampleDriverPartial("nnapi-sample_minimal") {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
private:
std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override;
};
-Return<void> SampleDriverMinimal::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> SampleDriverMinimal::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- Capabilities capabilities = {
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.4f, .powerUsage = 0.5f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.4f, .powerUsage = 0.5f},
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}),
.ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f},
.whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}};
- update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32,
{.execTime = 0.4f, .powerUsage = 0.5f});
- update(&capabilities.operandPerformance, OperandType::FLOAT32,
+ update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32,
{.execTime = 0.4f, .powerUsage = 0.5f});
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
std::vector<bool> SampleDriverMinimal::getSupportedOperationsImpl(const V1_3::Model& model) const {
@@ -67,13 +65,13 @@ std::vector<bool> SampleDriverMinimal::getSupportedOperationsImpl(const V1_3::Mo
// Simulate supporting just a few ops
for (size_t i = 0; i < count; i++) {
supported[i] = false;
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
switch (operation.type) {
- case OperationType::ADD:
- case OperationType::CONCATENATION:
- case OperationType::CONV_2D: {
- const Operand& firstOperand = model.main.operands[operation.inputs[0]];
- if (firstOperand.type == OperandType::TENSOR_FLOAT32) {
+ case V1_3::OperationType::ADD:
+ case V1_3::OperationType::CONCATENATION:
+ case V1_3::OperationType::CONV_2D: {
+ const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ if (firstOperand.type == V1_3::OperandType::TENSOR_FLOAT32) {
supported[i] = true;
}
break;
diff --git a/nn/driver/sample/SampleDriverPartial.cpp b/nn/driver/sample/SampleDriverPartial.cpp
index 0430a8527..0c3b4d4c2 100644
--- a/nn/driver/sample/SampleDriverPartial.cpp
+++ b/nn/driver/sample/SampleDriverPartial.cpp
@@ -32,25 +32,23 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
-Return<void> SampleDriverPartial::getSupportedOperations_1_3(const V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) {
+hardware::Return<void> SampleDriverPartial::getSupportedOperations_1_3(
+ const V1_3::Model& model, getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (validateModel(model)) {
std::vector<bool> supported = getSupportedOperationsImpl(model);
- cb(ErrorStatus::NONE, supported);
+ cb(V1_3::ErrorStatus::NONE, supported);
} else {
std::vector<bool> supported;
- cb(ErrorStatus::INVALID_ARGUMENT, supported);
+ cb(V1_3::ErrorStatus::INVALID_ARGUMENT, supported);
}
- return Void();
+ return hardware::Void();
}
-Return<ErrorStatus> SampleDriverPartial::prepareModel_1_3(
- const V1_3::Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+hardware::Return<V1_3::ErrorStatus> SampleDriverPartial::prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline, const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) {
std::vector<bool> supported = getSupportedOperationsImpl(model);
bool isModelFullySupported =
diff --git a/nn/driver/sample/SampleDriverPartial.h b/nn/driver/sample/SampleDriverPartial.h
index 9150a1eb3..953030c68 100644
--- a/nn/driver/sample/SampleDriverPartial.h
+++ b/nn/driver/sample/SampleDriverPartial.h
@@ -37,19 +37,19 @@ class SampleDriverPartial : public SampleDriver {
SampleDriverPartial(const char* name, const IOperationResolver* operationResolver =
BuiltinOperationResolver::get())
: SampleDriver(name, operationResolver) {}
- hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) override;
- hal::Return<hal::ErrorStatus> prepareModel_1_3(
- const hal::V1_3::Model& model, hal::ExecutionPreference preference,
- hal::Priority priority, const hal::OptionalTimePoint& deadline,
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
- const sp<hal::V1_3::IPreparedModelCallback>& callback) override;
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) override;
protected:
// Given a valid NNAPI Model returns a boolean vector that indicates which
// ops in the model are supported by a driver.
- virtual std::vector<bool> getSupportedOperationsImpl(const hal::V1_3::Model& model) const = 0;
+ virtual std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const = 0;
};
} // namespace sample_driver
diff --git a/nn/driver/sample/SampleDriverQuant.cpp b/nn/driver/sample/SampleDriverQuant.cpp
index 91eb6e268..f73a6fd4b 100644
--- a/nn/driver/sample/SampleDriverQuant.cpp
+++ b/nn/driver/sample/SampleDriverQuant.cpp
@@ -31,47 +31,45 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
class SampleDriverQuant : public SampleDriverPartial {
public:
SampleDriverQuant() : SampleDriverPartial("nnapi-sample_quant") {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
private:
std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override;
};
-Return<void> SampleDriverQuant::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> SampleDriverQuant::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- Capabilities capabilities = {
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 50.0f, .powerUsage = 1.0f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 50.0f, .powerUsage = 1.0f},
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({50.0f, 1.0f}),
.ifPerformance = {.execTime = 50.0f, .powerUsage = 1.0f},
.whilePerformance = {.execTime = 50.0f, .powerUsage = 1.0f}};
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
-static bool isQuantized(OperandType opType) {
- return opType == OperandType::TENSOR_QUANT8_ASYMM ||
- opType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED;
+static bool isQuantized(V1_3::OperandType opType) {
+ return opType == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
+ opType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED;
}
std::vector<bool> SampleDriverQuant::getSupportedOperationsImpl(const V1_3::Model& model) const {
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) {
- const Operand& firstOperand = model.main.operands[operation.inputs[0]];
+ const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]];
supported[i] = isQuantized(firstOperand.type);
- if (operation.type == OperationType::SELECT) {
- const Operand& secondOperand = model.main.operands[operation.inputs[1]];
+ if (operation.type == V1_3::OperationType::SELECT) {
+ const V1_3::Operand& secondOperand = model.main.operands[operation.inputs[1]];
supported[i] = isQuantized(secondOperand.type);
}
}
diff --git a/nn/driver/sample/SampleDriverUtils.cpp b/nn/driver/sample/SampleDriverUtils.cpp
index 7cccf92e2..e8c571818 100644
--- a/nn/driver/sample/SampleDriverUtils.cpp
+++ b/nn/driver/sample/SampleDriverUtils.cpp
@@ -23,9 +23,7 @@ namespace android {
namespace nn {
namespace sample_driver {
-using namespace hal;
-
-void notify(const sp<V1_0::IPreparedModelCallback>& callback, const ErrorStatus& status,
+void notify(const sp<V1_0::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status,
const sp<SamplePreparedModel>& preparedModel) {
const auto ret = callback->notify(convertToV1_0(status), preparedModel);
if (!ret.isOk()) {
@@ -33,7 +31,7 @@ void notify(const sp<V1_0::IPreparedModelCallback>& callback, const ErrorStatus&
}
}
-void notify(const sp<V1_2::IPreparedModelCallback>& callback, const ErrorStatus& status,
+void notify(const sp<V1_2::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status,
const sp<SamplePreparedModel>& preparedModel) {
const auto ret = callback->notify_1_2(convertToV1_0(status), preparedModel);
if (!ret.isOk()) {
@@ -42,7 +40,7 @@ void notify(const sp<V1_2::IPreparedModelCallback>& callback, const ErrorStatus&
}
}
-void notify(const sp<V1_3::IPreparedModelCallback>& callback, const ErrorStatus& status,
+void notify(const sp<V1_3::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status,
const sp<SamplePreparedModel>& preparedModel) {
const auto ret = callback->notify_1_3(status, preparedModel);
if (!ret.isOk()) {
@@ -51,24 +49,24 @@ void notify(const sp<V1_3::IPreparedModelCallback>& callback, const ErrorStatus&
}
}
-void notify(const sp<V1_0::IExecutionCallback>& callback, const ErrorStatus& status,
- const hidl_vec<OutputShape>&, Timing) {
+void notify(const sp<V1_0::IExecutionCallback>& callback, const V1_3::ErrorStatus& status,
+ const hardware::hidl_vec<V1_2::OutputShape>&, V1_2::Timing) {
const auto ret = callback->notify(convertToV1_0(status));
if (!ret.isOk()) {
LOG(ERROR) << "Error when calling IExecutionCallback::notify: " << ret.description();
}
}
-void notify(const sp<V1_2::IExecutionCallback>& callback, const ErrorStatus& status,
- const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+void notify(const sp<V1_2::IExecutionCallback>& callback, const V1_3::ErrorStatus& status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing) {
const auto ret = callback->notify_1_2(convertToV1_0(status), outputShapes, timing);
if (!ret.isOk()) {
LOG(ERROR) << "Error when calling IExecutionCallback::notify_1_2: " << ret.description();
}
}
-void notify(const sp<V1_3::IExecutionCallback>& callback, const ErrorStatus& status,
- const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+void notify(const sp<V1_3::IExecutionCallback>& callback, const V1_3::ErrorStatus& status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing) {
const auto ret = callback->notify_1_3(status, outputShapes, timing);
if (!ret.isOk()) {
LOG(ERROR) << "Error when calling IExecutionCallback::notify_1_3" << ret.description();
diff --git a/nn/driver/sample/SampleDriverUtils.h b/nn/driver/sample/SampleDriverUtils.h
index d5a87a1e1..3a34239b2 100644
--- a/nn/driver/sample/SampleDriverUtils.h
+++ b/nn/driver/sample/SampleDriverUtils.h
@@ -25,34 +25,34 @@ namespace android {
namespace nn {
namespace sample_driver {
-void notify(const sp<hal::V1_0::IPreparedModelCallback>& callback, const hal::ErrorStatus& status,
+void notify(const sp<V1_0::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status,
const sp<SamplePreparedModel>& preparedModel);
-void notify(const sp<hal::V1_2::IPreparedModelCallback>& callback, const hal::ErrorStatus& status,
+void notify(const sp<V1_2::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status,
const sp<SamplePreparedModel>& preparedModel);
-void notify(const sp<hal::V1_3::IPreparedModelCallback>& callback, const hal::ErrorStatus& status,
+void notify(const sp<V1_3::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status,
const sp<SamplePreparedModel>& preparedModel);
-void notify(const sp<hal::V1_0::IExecutionCallback>& callback, const hal::ErrorStatus& status,
- const hal::hidl_vec<hal::OutputShape>&, hal::Timing);
+void notify(const sp<V1_0::IExecutionCallback>& callback, const V1_3::ErrorStatus& status,
+ const hardware::hidl_vec<V1_2::OutputShape>&, V1_2::Timing);
-void notify(const sp<hal::V1_2::IExecutionCallback>& callback, const hal::ErrorStatus& status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes, hal::Timing timing);
+void notify(const sp<V1_2::IExecutionCallback>& callback, const V1_3::ErrorStatus& status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing);
-void notify(const sp<hal::V1_3::IExecutionCallback>& callback, const hal::ErrorStatus& status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes, hal::Timing timing);
+void notify(const sp<V1_3::IExecutionCallback>& callback, const V1_3::ErrorStatus& status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing);
template <typename T_Model, typename T_IPreparedModelCallback>
-hal::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driver,
- hal::ExecutionPreference preference, hal::Priority priority,
- const hal::OptionalTimePoint& halDeadline,
- const sp<T_IPreparedModelCallback>& callback,
- bool isFullModelSupported = true) {
+V1_3::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driver,
+ V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& halDeadline,
+ const sp<T_IPreparedModelCallback>& callback,
+ bool isFullModelSupported = true) {
const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
if (callback.get() == nullptr) {
LOG(ERROR) << "invalid callback passed to prepareModelBase";
- return hal::ErrorStatus::INVALID_ARGUMENT;
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (VLOG_IS_ON(DRIVER)) {
VLOG(DRIVER) << "prepareModelBase";
@@ -60,17 +60,17 @@ hal::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driv
}
if (!validateModel(model) || !validateExecutionPreference(preference) ||
!validatePriority(priority)) {
- notify(callback, hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
- return hal::ErrorStatus::INVALID_ARGUMENT;
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
if (!isFullModelSupported) {
- notify(callback, hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
- return hal::ErrorStatus::NONE;
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_3::ErrorStatus::NONE;
}
const auto deadline = makeDeadline(halDeadline);
if (hasDeadlinePassed(deadline)) {
- notify(callback, hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT, nullptr);
- return hal::ErrorStatus::NONE;
+ notify(callback, V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, nullptr);
+ return V1_3::ErrorStatus::NONE;
}
// asynchronously prepare the model from a new, detached thread
@@ -78,13 +78,13 @@ hal::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driv
sp<SamplePreparedModel> preparedModel =
new SamplePreparedModel(convertToV1_3(model), driver, preference, userId, priority);
if (!preparedModel->initialize()) {
- notify(callback, hal::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
return;
}
- notify(callback, hal::ErrorStatus::NONE, preparedModel);
+ notify(callback, V1_3::ErrorStatus::NONE, preparedModel);
}).detach();
- return hal::ErrorStatus::NONE;
+ return V1_3::ErrorStatus::NONE;
}
} // namespace sample_driver
diff --git a/nn/runtime/Callbacks.cpp b/nn/runtime/Callbacks.cpp
index 6a81b9c14..d31098c89 100644
--- a/nn/runtime/Callbacks.cpp
+++ b/nn/runtime/Callbacks.cpp
@@ -18,28 +18,25 @@
#include "Callbacks.h"
+#include <Utils.h>
#include <android-base/logging.h>
+
#include <limits>
#include <utility>
#include <vector>
namespace android::nn {
-using namespace hal;
-
-constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
- .timeInDriver = std::numeric_limits<uint64_t>::max()};
-
// PreparedModelCallback methods begin here
-Return<void> PreparedModelCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus,
- const sp<V1_0::IPreparedModel>& preparedModel) {
+hardware::Return<void> PreparedModelCallback::notifyInternal(
+ bool deadObject, ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) {
{
std::lock_guard<std::mutex> hold(mMutex);
// quick-return if object has already been notified
if (mNotified) {
- return Void();
+ return hardware::Void();
}
// store results and mark as notified
@@ -50,22 +47,22 @@ Return<void> PreparedModelCallback::notifyInternal(bool deadObject, ErrorStatus
}
mCondition.notify_all();
- return Void();
+ return hardware::Void();
}
-Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus,
- const sp<V1_0::IPreparedModel>& preparedModel) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel);
+hardware::Return<void> PreparedModelCallback::notify(
+ V1_0::ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel);
}
-Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
- const sp<V1_2::IPreparedModel>& preparedModel) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel);
+hardware::Return<void> PreparedModelCallback::notify_1_2(
+ V1_0::ErrorStatus errorStatus, const sp<V1_2::IPreparedModel>& preparedModel) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel);
}
-Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
- const sp<V1_3::IPreparedModel>& preparedModel) {
- return notifyInternal(false, errorStatus, preparedModel);
+hardware::Return<void> PreparedModelCallback::notify_1_3(
+ V1_3::ErrorStatus errorStatus, const sp<V1_3::IPreparedModel>& preparedModel) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel);
}
void PreparedModelCallback::notifyAsDeadObject() {
@@ -94,24 +91,26 @@ bool PreparedModelCallback::isDeadObject() const {
// ExecutionCallback methods begin here
-Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), {}, kNoTiming);
+hardware::Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), {}, {});
}
-Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
- const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), outputShapes, timing);
+hardware::Return<void> ExecutionCallback::notify_1_2(
+ V1_0::ErrorStatus errorStatus, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
}
-Return<void> ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
- const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- return notifyInternal(false, errorStatus, outputShapes, timing);
+hardware::Return<void> ExecutionCallback::notify_1_3(
+ V1_3::ErrorStatus errorStatus, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
+ return notifyInternal(false, uncheckedConvert(errorStatus), uncheckedConvert(outputShapes),
+ uncheckedConvert(timing));
}
void ExecutionCallback::notifyAsDeadObject() {
- notifyInternal(true, ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ notifyInternal(true, ErrorStatus::GENERAL_FAILURE, {}, {});
}
void ExecutionCallback::wait() const {
@@ -199,9 +198,9 @@ void ExecutionCallback::setOnFinish(const ExecutionFinish& finish) {
mOnFinish = finish;
}
-Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus,
- std::vector<OutputShape> outputShapes,
- Timing timing) {
+hardware::Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus,
+ std::vector<OutputShape> outputShapes,
+ Timing timing) {
// check results
if (!deadObject) {
if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
@@ -211,7 +210,7 @@ Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus erro
<< "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
errorStatus = ErrorStatus::GENERAL_FAILURE;
outputShapes = {};
- timing = kNoTiming;
+ timing = {};
}
} else if (errorStatus != ErrorStatus::NONE) {
// outputShapes must be empty if errorStatus is neither NONE nor
@@ -221,7 +220,7 @@ Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus erro
"neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
errorStatus = ErrorStatus::GENERAL_FAILURE;
outputShapes = {};
- timing = kNoTiming;
+ timing = {};
}
}
}
@@ -232,7 +231,7 @@ Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus erro
// quick-return if object has already been notified
if (mNotified) {
- return Void();
+ return hardware::Void();
}
mDeadObject = deadObject;
@@ -250,7 +249,7 @@ Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus erro
}
}
mCondition.notify_all();
- return Void();
+ return hardware::Void();
}
} // namespace android::nn
diff --git a/nn/runtime/Callbacks.h b/nn/runtime/Callbacks.h
index 75370254e..66408ce70 100644
--- a/nn/runtime/Callbacks.h
+++ b/nn/runtime/Callbacks.h
@@ -17,9 +17,11 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H
-#include "HalInterfaces.h"
-
+#include <HalInterfaces.h>
+#include <Utils.h>
#include <android-base/thread_annotations.h>
+#include <nnapi/Types.h>
+
#include <condition_variable>
#include <functional>
#include <mutex>
@@ -60,7 +62,7 @@ namespace android::nn {
*
* This callback object is passed as an argument to IDevice::prepareModel*.
*/
-class PreparedModelCallback : public hal::IPreparedModelCallback {
+class PreparedModelCallback : public V1_3::IPreparedModelCallback {
public:
/**
* IPreparedModelCallback::notify marks the callback object with the return
@@ -85,8 +87,8 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- hal::Return<void> notify(hal::V1_0::ErrorStatus status,
- const sp<hal::V1_0::IPreparedModel>& preparedModel) override;
+ hardware::Return<void> notify(V1_0::ErrorStatus status,
+ const sp<V1_0::IPreparedModel>& preparedModel) override;
/**
* IPreparedModelCallback::notify_1_2 marks the callback object with the
@@ -111,8 +113,8 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status,
- const sp<hal::V1_2::IPreparedModel>& preparedModel) override;
+ hardware::Return<void> notify_1_2(V1_0::ErrorStatus status,
+ const sp<V1_2::IPreparedModel>& preparedModel) override;
/**
* IPreparedModelCallback::notify_1_3 marks the callback object with the
@@ -139,8 +141,8 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status,
- const sp<hal::V1_3::IPreparedModel>& preparedModel) override;
+ hardware::Return<void> notify_1_3(V1_3::ErrorStatus status,
+ const sp<V1_3::IPreparedModel>& preparedModel) override;
/**
* Mark the callback object as a dead object. This acts as a call to notify.
@@ -169,7 +171,7 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
* - DEAD_OBJECT if the driver crashed without returning a result
*/
- hal::V1_3::ErrorStatus getStatus() const;
+ ErrorStatus getStatus() const;
/**
* Retrieves the model that has been prepared for execution from the
@@ -181,7 +183,7 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
* @return preparedModel Returned model that has been prepared for
* execution, nullptr if the model was unable to be prepared.
*/
- sp<hal::V1_0::IPreparedModel> getPreparedModel() const;
+ sp<V1_0::IPreparedModel> getPreparedModel() const;
/**
* Queries whether the object is dead.
@@ -191,15 +193,15 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
bool isDeadObject() const;
private:
- hal::Return<void> notifyInternal(bool deadObject, hal::ErrorStatus errorStatus,
- const sp<hal::V1_0::IPreparedModel>& preparedModel);
+ hardware::Return<void> notifyInternal(bool deadObject, ErrorStatus errorStatus,
+ const sp<V1_0::IPreparedModel>& preparedModel);
mutable std::mutex mMutex;
mutable std::condition_variable mCondition;
bool mNotified GUARDED_BY(mMutex) = false;
bool mDeadObject = false;
- hal::ErrorStatus mErrorStatus = hal::ErrorStatus::GENERAL_FAILURE;
- sp<hal::V1_0::IPreparedModel> mPreparedModel;
+ ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
@@ -216,9 +218,9 @@ class PreparedModelCallback : public hal::IPreparedModelCallback {
*
* This callback object is passed as an argument to IPreparedModel::execute*.
*/
-class ExecutionCallback : public hal::IExecutionCallback {
+class ExecutionCallback : public V1_3::IExecutionCallback {
using ExecutionFinish =
- std::function<hal::ErrorStatus(hal::ErrorStatus, const std::vector<hal::OutputShape>&)>;
+ std::function<ErrorStatus(ErrorStatus, const std::vector<OutputShape>&)>;
public:
/**
@@ -244,7 +246,7 @@ class ExecutionCallback : public hal::IExecutionCallback {
* enough to store the resultant values
* - INVALID_ARGUMENT if the input request is invalid
*/
- hal::Return<void> notify(hal::V1_0::ErrorStatus status) override;
+ hardware::Return<void> notify(V1_0::ErrorStatus status) override;
/**
* IExecutionCallback::notify_1_2 marks the callback object with the results
@@ -279,9 +281,9 @@ class ExecutionCallback : public hal::IExecutionCallback {
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
- hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes,
- const hal::Timing& timing) override;
+ hardware::Return<void> notify_1_2(V1_0::ErrorStatus status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) override;
/**
* IExecutionCallback::notify_1_3 marks the callback object with the results
@@ -318,15 +320,15 @@ class ExecutionCallback : public hal::IExecutionCallback {
* reported as UINT64_MAX. A driver may choose to report any time as
* UINT64_MAX, indicating that particular measurement is not available.
*/
- hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes,
- const hal::Timing& timing) override;
+ hardware::Return<void> notify_1_3(V1_3::ErrorStatus status,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) override;
// An overload of the latest notify interface to hide the version from ExecutionBuilder.
- hal::Return<void> notify(hal::V1_3::ErrorStatus status,
- const hal::hidl_vec<hal::OutputShape>& outputShapes,
- const hal::Timing& timing) {
- return notify_1_3(status, outputShapes, timing);
+ hardware::Return<void> notify(ErrorStatus status, const std::vector<OutputShape>& outputShapes,
+ const Timing& timing) {
+ return notify_1_3(convertToV1_3(status), convertToV1_2(outputShapes),
+ convertToV1_2(timing));
}
/**
@@ -362,7 +364,7 @@ class ExecutionCallback : public hal::IExecutionCallback {
* - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
* - DEAD_OBJECT if the driver crashed without returning a result
*/
- hal::V1_3::ErrorStatus getStatus() const;
+ ErrorStatus getStatus() const;
/**
* Retrieves the output shapes returned from the asynchronous task launched
@@ -385,7 +387,7 @@ class ExecutionCallback : public hal::IExecutionCallback {
* OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has
* at least one output operand that is not fully-specified.
*/
- const std::vector<hal::OutputShape>& getOutputShapes() const;
+ const std::vector<OutputShape>& getOutputShapes() const;
/**
* Retrieves the duration of execution of the asynchronous task launched by
@@ -400,7 +402,7 @@ class ExecutionCallback : public hal::IExecutionCallback {
* @return timing Duration of the execution. Every time must be UINT64_MAX
* unless the status is NONE.
*/
- hal::Timing getTiming() const;
+ Timing getTiming() const;
/**
* ExecutionCallback::bindThread binds a thread to the ExecutionCallback
@@ -461,9 +463,8 @@ class ExecutionCallback : public hal::IExecutionCallback {
* before any call to wait or get* return. It then enables all prior and
* future wait calls on the ExecutionCallback object to proceed.
*/
- hal::Return<void> notifyInternal(bool deadObject, hal::ErrorStatus errorStatus,
- std::vector<hal::OutputShape> outputShapes,
- hal::Timing timing);
+ hardware::Return<void> notifyInternal(bool deadObject, ErrorStatus errorStatus,
+ std::vector<OutputShape> outputShapes, Timing timing);
// members
mutable std::mutex mMutex;
@@ -472,9 +473,9 @@ class ExecutionCallback : public hal::IExecutionCallback {
ExecutionFinish mOnFinish GUARDED_BY(mMutex);
bool mNotified GUARDED_BY(mMutex) = false;
bool mDeadObject = false;
- hal::ErrorStatus mErrorStatus = hal::ErrorStatus::GENERAL_FAILURE;
- std::vector<hal::OutputShape> mOutputShapes;
- hal::Timing mTiming = {};
+ ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ std::vector<OutputShape> mOutputShapes;
+ Timing mTiming = {};
};
} // namespace android::nn
diff --git a/nn/runtime/CompilationBuilder.cpp b/nn/runtime/CompilationBuilder.cpp
index 051ac886c..5d2d5db07 100644
--- a/nn/runtime/CompilationBuilder.cpp
+++ b/nn/runtime/CompilationBuilder.cpp
@@ -36,8 +36,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
CompilationBuilder::CompilationBuilder(const ModelBuilder* model,
const std::vector<std::shared_ptr<Device>>& devices,
bool explicitDeviceList)
diff --git a/nn/runtime/Event.h b/nn/runtime/Event.h
index 982381a09..41b9a28e8 100644
--- a/nn/runtime/Event.h
+++ b/nn/runtime/Event.h
@@ -28,7 +28,7 @@ class IEvent {
public:
virtual ~IEvent() = default;
virtual void wait() const = 0;
- virtual hal::ErrorStatus getStatus() const = 0;
+ virtual ErrorStatus getStatus() const = 0;
virtual int getSyncFenceFd(bool shouldDup) const = 0;
};
@@ -40,7 +40,7 @@ class CallbackEvent : public IEvent {
}
void wait() const override { kExecutionCallback->wait(); }
- hal::ErrorStatus getStatus() const override { return kExecutionCallback->getStatus(); }
+ ErrorStatus getStatus() const override { return kExecutionCallback->getStatus(); }
// Always return -1 as this is not backed by a sync fence.
int getSyncFenceFd(bool /*should_dup*/) const override { return -1; }
@@ -51,7 +51,7 @@ class CallbackEvent : public IEvent {
// The SyncFenceEvent wraps sync fence and IFencedExecutionCallback
class SyncFenceEvent : public IEvent {
public:
- SyncFenceEvent(int sync_fence_fd, const sp<hal::IFencedExecutionCallback>& callback)
+ SyncFenceEvent(int sync_fence_fd, const sp<V1_3::IFencedExecutionCallback>& callback)
: kFencedExecutionCallback(callback) {
if (sync_fence_fd > 0) {
// Dup the provided file descriptor
@@ -69,18 +69,18 @@ class SyncFenceEvent : public IEvent {
// Get the status of the event.
// In case of syncWait error, query the dispatch callback for detailed
// error status.
- hal::ErrorStatus getStatus() const override {
- auto error = hal::ErrorStatus::NONE;
+ ErrorStatus getStatus() const override {
+ auto error = ErrorStatus::NONE;
if (mSyncFenceFd > 0 && syncWait(mSyncFenceFd, -1) != FenceState::SIGNALED) {
- error = hal::ErrorStatus::GENERAL_FAILURE;
+ error = ErrorStatus::GENERAL_FAILURE;
// If there is a callback available, use the callback to get the error code.
if (kFencedExecutionCallback != nullptr) {
- const hal::Return<void> ret = kFencedExecutionCallback->getExecutionInfo(
- [&error](hal::ErrorStatus status, hal::Timing, hal::Timing) {
- error = status;
+ const hardware::Return<void> ret = kFencedExecutionCallback->getExecutionInfo(
+ [&error](V1_3::ErrorStatus status, V1_2::Timing, V1_2::Timing) {
+ error = uncheckedConvert(status);
});
if (!ret.isOk()) {
- error = hal::ErrorStatus::GENERAL_FAILURE;
+ error = ErrorStatus::GENERAL_FAILURE;
}
}
}
@@ -102,7 +102,7 @@ class SyncFenceEvent : public IEvent {
private:
// TODO(b/148423931): used android::base::unique_fd instead.
int mSyncFenceFd = -1;
- const sp<hal::IFencedExecutionCallback> kFencedExecutionCallback;
+ const sp<V1_3::IFencedExecutionCallback> kFencedExecutionCallback;
};
} // namespace android::nn
diff --git a/nn/runtime/ExecutionBuilder.cpp b/nn/runtime/ExecutionBuilder.cpp
index 8b6b81758..aaf2bbdb9 100644
--- a/nn/runtime/ExecutionBuilder.cpp
+++ b/nn/runtime/ExecutionBuilder.cpp
@@ -45,12 +45,10 @@
namespace android {
namespace nn {
-using namespace hal;
-
// Partial validation of output shapes returned from driver, to ensure they
// conform to a very specific set of rules.
static bool validateOutputShapesFromDriver(ErrorStatus executionStatus, const ModelBuilder* model,
- const std::vector<hal::OutputShape>& shapes) {
+ const std::vector<OutputShape>& shapes) {
// Enforces the following rules (some of which are from b/154054474):
// - shapes vector is empty except in the case of NONE or OUTPUT_INSUFFICIENT_SIZE.
// If the vector is not empty, it must have as many entries as the step model has outputs.
@@ -61,21 +59,21 @@ static bool validateOutputShapesFromDriver(ErrorStatus executionStatus, const Mo
switch (executionStatus) {
case ErrorStatus::NONE: {
NN_RET_CHECK(shapes.size() == 0 || shapes.size() == model->outputCount())
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " output shapes vector must be empty or of length " << model->outputCount()
<< " but has length " << shapes.size();
NN_RET_CHECK(std::all_of(shapes.begin(), shapes.end(),
[](const OutputShape& shape) { return shape.isSufficient; }))
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " at least one output shape is unexpectedly marked !isSufficient";
const TypeManager* tm = TypeManager::get();
for (uint32_t outputIndex = 0, outputCount = shapes.size(); outputIndex < outputCount;
++outputIndex) {
- const hal::Operand& outputOperand = model->getOutputOperand(outputIndex);
+ const Operand& outputOperand = model->getOutputOperand(outputIndex);
NN_RET_CHECK(!tm->isTensorType(outputOperand.type) ||
(shapes[outputIndex].dimensions.size() != 0))
- << "With execution ErrorStatus " << toString(executionStatus) << " output#"
+ << "With execution ErrorStatus " << executionStatus << " output#"
<< outputIndex << " shape unexpectedly has zero rank";
}
@@ -83,18 +81,18 @@ static bool validateOutputShapesFromDriver(ErrorStatus executionStatus, const Mo
}
case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: {
NN_RET_CHECK(shapes.size() == model->outputCount())
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " output shapes vector must be of length " << model->outputCount()
<< " but has length " << shapes.size();
NN_RET_CHECK(std::any_of(shapes.begin(), shapes.end(),
[](const OutputShape& shape) { return !shape.isSufficient; }))
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " at least one output shape must have been marked !isSufficient";
break;
}
default: {
NN_RET_CHECK(shapes.size() == 0)
- << "With execution ErrorStatus " << toString(executionStatus)
+ << "With execution ErrorStatus " << executionStatus
<< " output shapes vector must be empty but has length " << shapes.size();
break;
}
@@ -102,13 +100,11 @@ static bool validateOutputShapesFromDriver(ErrorStatus executionStatus, const Mo
return true;
}
static bool validateOutputShapesFromDriver(int executionResultCode, const ModelBuilder* model,
- const std::vector<hal::OutputShape>& shapes) {
+ const std::vector<OutputShape>& shapes) {
return validateOutputShapesFromDriver(convertResultCodeToErrorStatus(executionResultCode),
model, shapes);
}
-const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-
static MeasureTiming measureTiming(const ExecutionBuilder* execution) {
return execution->measureTiming() ? MeasureTiming::YES : MeasureTiming::NO;
}
@@ -117,7 +113,7 @@ static bool checkDimensionInfo(const Operand& operand, const ANeuralNetworksOper
const char* tag, bool allowUnspecified) {
if (newType != nullptr) {
const Extension::OperandTypeInformation* info = nullptr;
- if (isExtensionOperandType(operand.type)) {
+ if (isExtension(operand.type)) {
NN_RET_CHECK(TypeManager::get()->getExtensionOperandTypeInfo(operand.type, &info));
}
if (validateOperandType(*newType, info, tag, allowUnspecified) !=
@@ -220,7 +216,8 @@ int ExecutionBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType*
}
int ExecutionBuilder::setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length) {
+ const RuntimeMemory* memory, size_t offset,
+ size_t length) {
// Should be similar to StepExecutor::setInputOrOutputFromMemory()
if (mStarted) {
@@ -297,7 +294,8 @@ int ExecutionBuilder::setOutput(uint32_t index, const ANeuralNetworksOperandType
}
int ExecutionBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length) {
+ const RuntimeMemory* memory, size_t offset,
+ size_t length) {
// Should be similar to StepExecutor::setInputOrOutputFromMemory()
if (mStarted) {
@@ -383,12 +381,12 @@ int ExecutionBuilder::getDuration(int32_t durationCode, uint64_t* duration) cons
Timing timingFenced = timingLaunched;
if (mFencedExecutionCallback != nullptr) {
ErrorStatus status;
- const Return<void> ret = mFencedExecutionCallback->getExecutionInfo(
- [&status, &timingLaunched, &timingFenced](ErrorStatus error, Timing tLaunched,
- Timing tFenced) {
- status = error;
- timingLaunched = tLaunched;
- timingFenced = tFenced;
+ const hardware::Return<void> ret = mFencedExecutionCallback->getExecutionInfo(
+ [&status, &timingLaunched, &timingFenced](
+ V1_3::ErrorStatus error, V1_2::Timing tLaunched, V1_2::Timing tFenced) {
+ status = uncheckedConvert(error);
+ timingLaunched = uncheckedConvert(tLaunched);
+ timingFenced = uncheckedConvert(tFenced);
});
if (!ret.isOk()) {
*duration = UINT64_MAX;
@@ -546,7 +544,7 @@ cpuFallbackPartial(const ExecutionPlan& plan,
std::shared_ptr<StepExecutor> executor;
int n1 = plan.fallback(controller, &executor, nullptr, nullptr);
if (n1 != ANEURALNETWORKS_NO_ERROR) {
- return {n1, {}, kNoTiming, nullptr};
+ return {n1, {}, {}, nullptr};
}
CHECK(executor != nullptr);
@@ -565,7 +563,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
VLOG(EXECUTION) << "ExecutionBuilder::compute (from plan, iteratively)";
std::vector<OutputShape> outputShapes = executionBuilder->getInitialOutputShapes();
- Timing timing = kNoTiming;
+ Timing timing;
// Disallow CPU fallback when the ExecutionPlan is simple on CPU.
allowCpuFallback &= !plan.isSimpleCpu();
@@ -589,7 +587,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
bool missedDeadline = n == ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT ||
n == ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
if (allowCpuFallback && !missedDeadline) break;
- executionCallback->notify(convertResultCodeToErrorStatus(n), {}, kNoTiming);
+ executionCallback->notify(convertResultCodeToErrorStatus(n), {}, {});
return;
}
@@ -636,7 +634,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
// - we didn't learn anything new about dynamic temporaries.
// Neither of these is recoverable, so end execution.
const ErrorStatus stepStatus = convertResultCodeToErrorStatus(stepN);
- executionCallback->notify(stepStatus, outputShapes, kNoTiming);
+ executionCallback->notify(stepStatus, outputShapes, {});
return;
}
// Every main model output is of sufficient size. This implies that
@@ -649,7 +647,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
// If CPU fallback is not allowed and there was an error, end execution.
if (!allowCpuFallback) {
const ErrorStatus stepStatus = convertResultCodeToErrorStatus(stepN);
- executionCallback->notify(stepStatus, {}, kNoTiming);
+ executionCallback->notify(stepStatus, {}, {});
return;
}
@@ -658,7 +656,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
// (2) return from the function with an error
if (executorIsCpu) {
if (!plan.isSimple()) break;
- executionCallback->notify(convertResultCodeToErrorStatus(stepN), {}, kNoTiming);
+ executionCallback->notify(convertResultCodeToErrorStatus(stepN), {}, {});
return;
}
@@ -706,7 +704,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
// - we didn't learn anything new about dynamic temporaries.
// Neither of these is recoverable, so end execution.
const ErrorStatus fallbackStatus = convertResultCodeToErrorStatus(fallbackN);
- executionCallback->notify(fallbackStatus, outputShapes, kNoTiming);
+ executionCallback->notify(fallbackStatus, outputShapes, {});
return;
}
// Every main model output is of sufficient size. This implies
@@ -718,7 +716,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
// Do not fallback twice if the ExecutionPlan is simple.
if (plan.isSimple()) {
const ErrorStatus fallbackStatus = convertResultCodeToErrorStatus(fallbackN);
- executionCallback->notify(fallbackStatus, {}, kNoTiming);
+ executionCallback->notify(fallbackStatus, {}, {});
return;
}
@@ -748,7 +746,7 @@ static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder,
// fence and the fenced compute callback returned from the last partition.
// Any failed partition will result in the whole execution fallback to CPU if
// allowCpuFallback is set to true.
-static std::tuple<int, int, sp<hal::IFencedExecutionCallback>> startComputeFenced(
+static std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>> startComputeFenced(
ExecutionBuilder* executionBuilder, const ExecutionPlan& plan,
std::shared_ptr<ExecutionPlan::Controller> controller, const std::vector<int>& waitFor,
uint64_t timeoutDurationAfterFence, const std::optional<Deadline>& deadline,
@@ -773,7 +771,7 @@ static std::tuple<int, int, sp<hal::IFencedExecutionCallback>> startComputeFence
// Initiate waitForFds, syncFence for the first step.
std::vector<int> waitForFds = waitFor;
int syncFence = -1;
- sp<hal::IFencedExecutionCallback> computeFencedCallback;
+ sp<V1_3::IFencedExecutionCallback> computeFencedCallback;
while (true) {
VLOG(EXECUTION) << "looking for next StepExecutor";
@@ -942,7 +940,7 @@ int ExecutionBuilder::compute(sp<ExecutionCallback>* synchronizationCallback,
LOG(ERROR) << "ANeuralNetworksExecution_" << name() << " not all inputs specified";
return ANEURALNETWORKS_BAD_DATA;
} else if (p.state() == ModelArgumentInfo::MEMORY) {
- const Memory* memory = mMemories[p.locationAndLength().poolIndex];
+ const RuntimeMemory* memory = mMemories[p.locationAndLength().poolIndex];
if (!memory->getValidator().validateInputDimensions(p.dimensions())) {
return ANEURALNETWORKS_OP_FAILED;
}
@@ -1015,7 +1013,7 @@ std::vector<OutputShape> ExecutionBuilder::getInitialOutputShapes() const {
std::vector<OutputShape> outputShapes(mOutputs.size());
std::transform(mOutputs.begin(), mOutputs.end(), outputShapes.begin(),
[](const auto& x) -> OutputShape {
- hidl_vec<uint32_t> dimensions;
+ std::vector<uint32_t> dimensions;
if (x.state() != ModelArgumentInfo::HAS_NO_VALUE) {
dimensions = x.dimensions();
}
@@ -1067,7 +1065,7 @@ bool ExecutionBuilder::updateOutputShapes(ErrorStatus status,
bool ExecutionBuilder::updateMemories() {
for (const auto& output : mOutputs) {
if (output.state() != ModelArgumentInfo::MEMORY) continue;
- const Memory* memory = mMemories[output.locationAndLength().poolIndex];
+ const RuntimeMemory* memory = mMemories[output.locationAndLength().poolIndex];
NN_RET_CHECK(memory->getValidator().updateMetadata({.dimensions = output.dimensions()}));
}
return true;
@@ -1084,7 +1082,7 @@ ErrorStatus ExecutionBuilder::finishWithoutSyncFence(ErrorStatus status,
bool success = status == ErrorStatus::NONE;
for (const auto& output : mOutputs) {
if (output.state() != ModelArgumentInfo::MEMORY) continue;
- const Memory* memory = mMemories[output.locationAndLength().poolIndex];
+ const RuntimeMemory* memory = mMemories[output.locationAndLength().poolIndex];
memory->getValidator().setInitialized(success);
}
switch (convertErrorStatusToResultCode(status)) {
@@ -1124,7 +1122,7 @@ bool StepExecutor::updateOutputShapes(int executionResultCode, const std::vector
if (VLOG_IS_ON(EXECUTION)) {
for (const auto& shape : from) {
- VLOG(EXECUTION) << "updateOutputShapes: " << toString(shape);
+ VLOG(EXECUTION) << "updateOutputShapes: " << shape;
}
}
@@ -1233,8 +1231,8 @@ bool StepExecutor::updateOutputShapes(int executionResultCode, const std::vector
StepExecutor::StepExecutor(ExecutionBuilder* executionBuilder, const ModelBuilder* model,
std::shared_ptr<Device> device,
- std::shared_ptr<PreparedModel> preparedModel, const ExecutionStep* step,
- DynamicTemporaries* dynamicTemporaries)
+ std::shared_ptr<RuntimePreparedModel> preparedModel,
+ const ExecutionStep* step, DynamicTemporaries* dynamicTemporaries)
: mExecutionBuilder(executionBuilder),
mExecutionStep(step),
mDynamicTemporaries(dynamicTemporaries),
@@ -1261,7 +1259,7 @@ void StepExecutor::mapInputsAndOutputsTrivially() {
void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
ModelArgumentInfo* executorInputOrOutput,
- const hidl_vec<uint32_t>* builderDimensions) {
+ const Dimensions* builderDimensions) {
auto updateDimensions = [executorInputOrOutput, builderDimensions] {
if (!builderDimensions) {
return;
@@ -1283,7 +1281,7 @@ void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutpu
case ModelArgumentInfo::MEMORY: {
updateDimensions();
const uint32_t builderPoolIndex = builderInputOrOutput.locationAndLength().poolIndex;
- const Memory* memory = mExecutionBuilder->mMemories[builderPoolIndex];
+ const RuntimeMemory* memory = mExecutionBuilder->mMemories[builderPoolIndex];
const uint32_t executorPoolIndex = mMemories.add(memory);
executorInputOrOutput->locationAndLength().poolIndex = executorPoolIndex;
break;
@@ -1292,8 +1290,8 @@ void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutpu
}
int StepExecutor::setInputOrOutputFromMemory(const Operand& inputOrOutputOperand,
- const Memory* memory, uint32_t offset,
- const hal::hidl_vec<uint32_t>& dimensions,
+ const RuntimeMemory* memory, uint32_t offset,
+ const Dimensions& dimensions,
std::optional<uint32_t> length,
ModelArgumentInfo* inputOrOutputInfo) {
// Should be similar to
@@ -1361,12 +1359,6 @@ bool StepExecutor::isCpu() const {
return mDevice == DeviceManager::getCpuDevice();
}
-static OptionalTimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) {
- OptionalTimeoutDuration otd;
- otd.nanoseconds(nanoseconds);
- return otd;
-}
-
std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::compute(
const std::optional<Deadline>& deadline,
const std::shared_ptr<ExecutionBurstController>& burstController) {
@@ -1374,7 +1366,7 @@ std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::compute(
}
std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeWithMemories(
- const std::optional<Deadline>& deadline, const std::vector<const Memory*>& memories,
+ const std::optional<Deadline>& deadline, const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController) {
CHECK(mPreparedModel != nullptr);
@@ -1393,7 +1385,7 @@ std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeWithMemor
return {n, std::move(outputShapes), timing};
}
-std::tuple<int, int, sp<hal::IFencedExecutionCallback>> StepExecutor::computeFenced(
+std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>> StepExecutor::computeFenced(
const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence,
const std::optional<Deadline>& deadline) {
CHECK(mPreparedModel != nullptr);
@@ -1408,7 +1400,7 @@ std::tuple<int, int, sp<hal::IFencedExecutionCallback>> StepExecutor::computeFen
makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration());
OptionalTimeoutDuration optionalTimeoutDurationAfterFence;
if (timeoutDurationAfterFence > 0) {
- optionalTimeoutDurationAfterFence.nanoseconds(timeoutDurationAfterFence);
+ optionalTimeoutDurationAfterFence = makeTimeoutDuration(timeoutDurationAfterFence);
}
const auto [n, syncFence, computeFencedCallback, timing] = mPreparedModel->executeFenced(
mInputs, mOutputs, mMemories.getObjects(), waitFor, measure, deadline,
@@ -1425,24 +1417,24 @@ std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeOnCpuFall
VLOG(EXECUTION) << "Re-compile the model on CPU";
mDevice = DeviceManager::getCpuDevice();
mPreparedModel = nullptr;
- const ModelFactory makeModel = [this] { return mModel->makeHidlModel(); };
+ const ModelFactory makeModel = [this] { return mModel->makeModel(); };
// TODO: Propagate user preference and compilation priority to this point instead of using
// default values of ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER and
// ANEURALNETWORKS_PRIORITY_MEDIUM
const ExecutionPreference preference =
static_cast<ExecutionPreference>(ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER);
- const Priority priority = convertToHalPriority(ANEURALNETWORKS_PRIORITY_DEFAULT);
+ const Priority priority = convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_DEFAULT);
auto [n, preparedModel] = mDevice->prepareModel(makeModel, preference, priority, {}, {}, {});
mPreparedModel = std::move(preparedModel);
if (n != ANEURALNETWORKS_NO_ERROR) {
- return {n, {}, kNoTiming};
+ return {n, {}, {}};
}
// Prepare device memories for CPU fallback.
- std::vector<const Memory*> memories = mMemories.getObjects();
+ std::vector<const RuntimeMemory*> memories = mMemories.getObjects();
std::vector<bool> isUsedAsInput(memories.size(), false);
std::vector<bool> isUsedAsOutput(memories.size(), false);
- std::vector<std::unique_ptr<Memory>> blobAhwbs;
+ std::vector<std::unique_ptr<RuntimeMemory>> blobAhwbs;
// Mark the input and output usages.
for (auto& input : mInputs) {
@@ -1458,7 +1450,7 @@ std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeOnCpuFall
if (mMemories[poolIndex]->getValidator().createdWithUnknownShape()) {
LOG(ERROR) << "Cannot fallback to CPU because at least one of the output operands "
"has unknown shape.";
- return {ANEURALNETWORKS_OP_FAILED, {}, kNoTiming};
+ return {ANEURALNETWORKS_OP_FAILED, {}, {}};
}
isUsedAsOutput[poolIndex] = true;
}
@@ -1466,17 +1458,17 @@ std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeOnCpuFall
// Allocate BLOB mode AHardwareBuffers and read the data from input device memories.
for (uint32_t i = 0; i < memories.size(); i++) {
- const Memory* memory = mMemories[i];
+ const RuntimeMemory* memory = mMemories[i];
if (memory->getIBuffer() != nullptr) {
const uint32_t size = memory->getValidator().getMetadata().logicalSize;
auto [nAhwb, blobAhwb] = MemoryRuntimeAHWB::create(size);
if (nAhwb != ANEURALNETWORKS_NO_ERROR) {
- return {nAhwb, {}, kNoTiming};
+ return {nAhwb, {}, {}};
}
if (isUsedAsInput[i]) {
n = copyIBufferToHidlMemory(memory->getIBuffer(), blobAhwb->getHidlMemory());
if (n != ANEURALNETWORKS_NO_ERROR) {
- return {n, {}, kNoTiming};
+ return {n, {}, {}};
}
}
memories[i] = blobAhwb.get();
@@ -1491,11 +1483,11 @@ std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeOnCpuFall
// Write back to output device memories.
for (uint32_t i = 0; i < memories.size(); i++) {
- const Memory* memory = mMemories[i];
+ const RuntimeMemory* memory = mMemories[i];
if (memory->getIBuffer() != nullptr && isUsedAsOutput[i]) {
n = copyHidlMemoryToIBuffer(memories[i]->getHidlMemory(), memory->getIBuffer(), {});
if (n != ANEURALNETWORKS_NO_ERROR) {
- return {n, {}, kNoTiming};
+ return {n, {}, {}};
}
}
}
diff --git a/nn/runtime/ExecutionBuilder.h b/nn/runtime/ExecutionBuilder.h
index 2540f233c..1dbfce6bb 100644
--- a/nn/runtime/ExecutionBuilder.h
+++ b/nn/runtime/ExecutionBuilder.h
@@ -43,9 +43,9 @@ class DynamicTemporaries;
class ExecutionBurstController;
class ExecutionPlan;
class ExecutionStep;
-class Memory;
class ModelBuilder;
-class PreparedModel;
+class RuntimeMemory;
+class RuntimePreparedModel;
class StepExecutor;
class ExecutionBuilder {
@@ -57,11 +57,11 @@ class ExecutionBuilder {
int setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer,
size_t length);
int setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length);
+ const RuntimeMemory* memory, size_t offset, size_t length);
int setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
size_t length);
int setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
- const Memory* memory, size_t offset, size_t length);
+ const RuntimeMemory* memory, size_t offset, size_t length);
int setMeasureTiming(bool measure);
@@ -86,30 +86,29 @@ class ExecutionBuilder {
int burstCompute(BurstBuilder* burst) { return compute(nullptr, burst); }
// Initialize output dimensional information from ModelArgumentInfo.
- std::vector<hal::OutputShape> getInitialOutputShapes() const;
+ std::vector<OutputShape> getInitialOutputShapes() const;
int getOutputOperandDimensions(uint32_t index, uint32_t* dimensions);
int getOutputOperandRank(uint32_t index, uint32_t* rank);
// Handshake with lower-level execution support
bool measureTiming() const { return mMeasureTiming; }
- void reportTimingWithoutFencedExecutionCallback(hal::Timing timing) {
+ void reportTimingWithoutFencedExecutionCallback(Timing timing) {
mTimingWithoutFencedExecutionCallback = timing;
}
const CompilationBuilder* getCompilation() const { return mCompilation; }
const ModelBuilder* getModel() const { return mModel; }
const ModelBuilder* getSourceModel(uint32_t index) const;
- const hal::Operand& getSourceOperand(
- const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
+ const Operand& getSourceOperand(const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
return getSourceModel(sourceOperandIndex.first)->getOperand(sourceOperandIndex.second);
}
- hal::ErrorStatus finishWithoutSyncFence(hal::ErrorStatus error,
- const std::vector<hal::OutputShape>& outputShapes);
+ ErrorStatus finishWithoutSyncFence(ErrorStatus error,
+ const std::vector<OutputShape>& outputShapes);
// Retrieve a reference to the IFencedExecutionCallback callback.
- const sp<hal::IFencedExecutionCallback>& getFencedExecutionCallback() {
+ const sp<V1_3::IFencedExecutionCallback>& getFencedExecutionCallback() {
return mFencedExecutionCallback;
}
@@ -136,8 +135,7 @@ class ExecutionBuilder {
const CompilationBuilder* mCompilation;
// Update output dimensional information from OutputShape to ModelArgumentInfo.
- bool updateOutputShapes(hal::ErrorStatus status,
- const std::vector<hal::OutputShape>& outputShapes);
+ bool updateOutputShapes(ErrorStatus status, const std::vector<OutputShape>& outputShapes);
bool updateMemories();
@@ -153,7 +151,7 @@ class ExecutionBuilder {
// The information we'll send to the driver about the inputs and outputs.
// Note that we build this in two steps:
// 1. As the arguments are specified, set the corresponding mInputs or mOutputs element.
- // If set from a pointer, don't set the location in the RequestArgument but store it
+ // If set from a pointer, don't set the location in the Request::Argument but store it
// instead in mInputBuffers or mOutputBuffers.
// 2. Once we have all the inputs and outputs, if needed, allocate shared memory for
// the m*Buffers entries. Copy the input values into the shared memory.
@@ -169,7 +167,7 @@ class ExecutionBuilder {
// Timing reported from the driver. This field is only used if
// mFencedExecutionCallback is nullptr.
- hal::Timing mTimingWithoutFencedExecutionCallback = {};
+ Timing mTimingWithoutFencedExecutionCallback = {};
// Amount of time to complete or abort the execution.
std::optional<uint64_t> mTimeoutDuration;
@@ -207,7 +205,7 @@ class ExecutionBuilder {
// doesn't support fenced execution (e.g., the driver is too old), or if the
// launch of execution on the driver fails, then this callback will be
// nullptr.
- sp<hal::IFencedExecutionCallback> mFencedExecutionCallback;
+ sp<V1_3::IFencedExecutionCallback> mFencedExecutionCallback;
};
// class StepExecutor is used to execute a single "step" in a
@@ -236,7 +234,8 @@ class StepExecutor {
// of "step" models. Must be nullptr otherwise.
// (step == nullptr) == (dynamicTemporaries == nullptr)
StepExecutor(ExecutionBuilder* executionBuilder, const ModelBuilder* model,
- std::shared_ptr<Device> device, std::shared_ptr<PreparedModel> preparedModel,
+ std::shared_ptr<Device> device,
+ std::shared_ptr<RuntimePreparedModel> preparedModel,
const ExecutionStep* step = nullptr,
DynamicTemporaries* dynamicTemporaries = nullptr);
@@ -255,8 +254,8 @@ class StepExecutor {
bool zeroSizedInput; // is at least one output of this execution step a zero-sized tensor
// that needs to be read by some other step of the same execution?
};
- bool updateOutputShapes(int executionResultCode, const std::vector<hal::OutputShape>& from,
- std::vector<hal::OutputShape>* to, UpdateOutputShapes* update);
+ bool updateOutputShapes(int executionResultCode, const std::vector<OutputShape>& from,
+ std::vector<OutputShape>* to, UpdateOutputShapes* update);
// Map inputs and outputs from ExecutionBuilder to StepExecutor,
// one at a time. Note that these are input/output indexes, not
@@ -271,7 +270,7 @@ class StepExecutor {
mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mOutputs[executorIndex]);
}
void mapOutputToInput(uint32_t builderIndex, uint32_t executorIndex,
- const hal::hidl_vec<uint32_t>* outputDimensions) {
+ const Dimensions* outputDimensions) {
mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mInputs[executorIndex],
outputDimensions);
}
@@ -282,33 +281,33 @@ class StepExecutor {
// (i.e., either rank must match, or operand rank must be zero; and for each
// individual dimension, either dimension must match, or operand dimension
// must be zero).
- int setInputFromMemory(uint32_t inputIndex, const Memory* memory, uint32_t offset,
- const hal::hidl_vec<uint32_t>& dimensions = {},
+ int setInputFromMemory(uint32_t inputIndex, const RuntimeMemory* memory, uint32_t offset,
+ const Dimensions& dimensions = {},
std::optional<uint32_t> length = std::nullopt) {
return setInputOrOutputFromMemory(mModel->getInputOperand(inputIndex), memory, offset,
dimensions, length, &mInputs.at(inputIndex));
}
- int setOutputFromMemory(uint32_t outputIndex, const Memory* memory, uint32_t offset,
- const hal::hidl_vec<uint32_t>& dimensions = {},
+ int setOutputFromMemory(uint32_t outputIndex, const RuntimeMemory* memory, uint32_t offset,
+ const Dimensions& dimensions = {},
std::optional<uint32_t> length = std::nullopt) {
return setInputOrOutputFromMemory(mModel->getOutputOperand(outputIndex), memory, offset,
dimensions, length, &mOutputs.at(outputIndex));
}
// Executes using the (driver, preparedModel) specified at construction time.
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> compute(
+ std::tuple<int, std::vector<OutputShape>, Timing> compute(
const std::optional<Deadline>& deadline,
const std::shared_ptr<ExecutionBurstController>& burstController = nullptr);
// Re-compiles and executes using the CPU, regardless of the (driver,
// preparedModel) specified at construction time.
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> computeOnCpuFallback();
+ std::tuple<int, std::vector<OutputShape>, Timing> computeOnCpuFallback();
bool isCpu() const;
// Perform fenced execution and return error_code, sync_fence_fd and a
// callback.
- std::tuple<int, int, sp<hal::IFencedExecutionCallback>> computeFenced(
+ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>> computeFenced(
const std::vector<int>& wait_for, uint64_t timeoutDurationAfterFence,
const std::optional<Deadline>& deadline);
@@ -321,7 +320,7 @@ class StepExecutor {
// specified dimensions.
void mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput,
ModelArgumentInfo* executorInputOrOutput,
- const hal::hidl_vec<uint32_t>* builderDimensions = nullptr);
+ const Dimensions* builderDimensions = nullptr);
// If no length is provided, the input or output is assumed to have the length
// of the corresponding operand. dimensions must either have zero rank or
@@ -329,13 +328,14 @@ class StepExecutor {
// dimensions (i.e., either rank must match, or operand rank must be zero;
// and for each individual dimension, either dimension must match, or
// operand dimension must be zero).
- int setInputOrOutputFromMemory(const hal::Operand& inputOrOutputOperand, const Memory* memory,
- uint32_t offset, const hal::hidl_vec<uint32_t>& dimensions,
+ int setInputOrOutputFromMemory(const Operand& inputOrOutputOperand, const RuntimeMemory* memory,
+ uint32_t offset, const Dimensions& dimensions,
std::optional<uint32_t> length,
ModelArgumentInfo* inputOrOutputInfo);
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> computeWithMemories(
- const std::optional<Deadline>& deadline, const std::vector<const Memory*>& memories,
+ std::tuple<int, std::vector<OutputShape>, Timing> computeWithMemories(
+ const std::optional<Deadline>& deadline,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController = nullptr);
// describes the full (possibly multiple-"step") execution
@@ -351,12 +351,12 @@ class StepExecutor {
// compiled forms; and device on which to execute it
const ModelBuilder* mModel;
std::shared_ptr<Device> mDevice;
- std::shared_ptr<PreparedModel> mPreparedModel;
+ std::shared_ptr<RuntimePreparedModel> mPreparedModel;
// The information we'll send to the driver about the inputs and outputs.
// Note that we build this in two steps:
// 1. As the arguments are specified, set the corresponding mInputs or mOutputs element.
- // If set from a pointer, don't set the location in the RequestArgument but store it
+ // If set from a pointer, don't set the location in the Request::Argument but store it
// instead in mInputBuffers or mOutputBuffers.
// 2. Once we have all the inputs and outputs, if needed, allocate shared memory for
// the m*Buffers entries. Copy the input values into the shared memory.
diff --git a/nn/runtime/ExecutionPlan.cpp b/nn/runtime/ExecutionPlan.cpp
index c3aa61f92..ba6911607 100644
--- a/nn/runtime/ExecutionPlan.cpp
+++ b/nn/runtime/ExecutionPlan.cpp
@@ -58,8 +58,6 @@ namespace nn {
namespace {
-using namespace hal;
-
// The index of the main model in SourceModels.
constexpr uint32_t kMainModelInSourceModels = 0;
@@ -71,7 +69,7 @@ constexpr uint32_t kMainModelInSourceModels = 0;
int compile(const Device& device, const ModelBuilder& model, int executionPreference,
int compilationPriority, const std::optional<Deadline>& deadline,
const std::string& cacheDir, TokenHasher* token,
- std::shared_ptr<PreparedModel>* preparedModel) {
+ std::shared_ptr<RuntimePreparedModel>* preparedModel) {
CHECK(token != nullptr);
CHECK(preparedModel != nullptr);
*preparedModel = nullptr;
@@ -82,12 +80,14 @@ int compile(const Device& device, const ModelBuilder& model, int executionPrefer
token->updateFromString(device.getVersionString().c_str()) &&
token->update(&executionPreference, sizeof(executionPreference)) &&
token->update(&compilationPriority, sizeof(compilationPriority)) && token->finish()) {
- cacheToken.emplace(token->getCacheToken());
+ cacheToken = CacheToken{};
+ const uint8_t* tokenPtr = token->getCacheToken();
+ std::copy(tokenPtr, tokenPtr + cacheToken->size(), cacheToken->begin());
}
- const ModelFactory makeModel = [&model] { return model.makeHidlModel(); };
+ const ModelFactory makeModel = [&model] { return model.makeModel(); };
const ExecutionPreference preference = static_cast<ExecutionPreference>(executionPreference);
- const Priority priority = convertToHalPriority(compilationPriority);
+ const Priority priority = convertToCanonicalPriority(compilationPriority);
const auto [n, returnedPreparedModel] =
device.prepareModel(makeModel, preference, priority, deadline, cacheDir, cacheToken);
*preparedModel = returnedPreparedModel;
@@ -99,27 +99,24 @@ typedef std::function<void(uint32_t)> OperationReadyCallback;
int copyOperandExtraParams(ModelBuilder& model, uint32_t toOperandIndex,
const Operand& fromOperand) {
if (fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
- fromOperand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::channelQuant) {
- auto& fromChannelQuant = fromOperand.extraParams.channelQuant();
+ std::holds_alternative<Operand::SymmPerChannelQuantParams>(fromOperand.extraParams)) {
+ auto& fromChannelQuant =
+ std::get<Operand::SymmPerChannelQuantParams>(fromOperand.extraParams);
ANeuralNetworksSymmPerChannelQuantParams toChannelQuant = {
.channelDim = fromChannelQuant.channelDim,
.scaleCount = static_cast<uint32_t>(fromChannelQuant.scales.size()),
.scales = fromChannelQuant.scales.data(),
};
return model.setOperandSymmPerChannelQuantParams(toOperandIndex, toChannelQuant);
- } else if (isExtensionOperandType(fromOperand.type) &&
- fromOperand.extraParams.getDiscriminator() ==
- OperandExtraParams::hidl_discriminator::extension) {
- hidl_vec<uint8_t> extensionData = fromOperand.extraParams.extension();
+ } else if (isExtension(fromOperand.type) &&
+ std::holds_alternative<Operand::ExtensionParams>(fromOperand.extraParams)) {
+ auto extensionData = std::get<Operand::ExtensionParams>(fromOperand.extraParams);
return model.setOperandExtensionData(toOperandIndex, extensionData.data(),
extensionData.size());
- } else if (fromOperand.extraParams.getDiscriminator() !=
- OperandExtraParams::hidl_discriminator::none ||
+ } else if (!std::holds_alternative<Operand::NoParams>(fromOperand.extraParams) ||
fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- LOG(ERROR) << "Type " << toString(fromOperand.type)
- << " has an unexpected extraParams discriminator: "
- << static_cast<int>(fromOperand.extraParams.getDiscriminator());
+ LOG(ERROR) << "Type " << fromOperand.type
+ << " has an unexpected extraParams variant: " << fromOperand.extraParams.index();
return ANEURALNETWORKS_BAD_DATA;
} else {
return ANEURALNETWORKS_NO_ERROR;
@@ -153,8 +150,8 @@ OperandTracker::OperandTracker(const ModelBuilder* model, OperationReadyCallback
uint32_t count = 0;
for (uint32_t operandIndex : operation.inputs) {
auto lifetime = mModel->getOperand(operandIndex).lifetime;
- if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
count++;
mOperandToOperations.emplace(operandIndex, operationIndex);
}
@@ -193,21 +190,6 @@ std::string toString(SourceOperandIndex sourceOperandIndex) {
std::to_string(sourceOperandIndex.second) + ")";
};
-std::string toString(hidl_vec<uint32_t> dimensions) {
- std::string ret = "(";
- bool wroteOne = false;
- for (uint32_t dimension : dimensions) {
- if (wroteOne) {
- ret += ", ";
- } else {
- wroteOne = true;
- }
- ret += std::to_string(dimension);
- }
- ret += ")";
- return ret;
-};
-
} // namespace
void DynamicTemporaries::vlogDump(const char* context) const {
@@ -227,8 +209,7 @@ void DynamicTemporaries::vlogDump(const char* context) const {
}
void DynamicTemporaries::declare(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex,
- const hidl_vec<uint32_t>& initialDimensions,
- uint32_t initialLength) {
+ const Dimensions& initialDimensions, uint32_t initialLength) {
VLOG(EXECUTION) << "DynamicTemporaries::declare(sourceOperandIndex = "
<< toString(sourceOperandIndex) << ", stepIndex = " << stepIndex
<< ", initialDimensions = " << toString(initialDimensions)
@@ -243,7 +224,7 @@ void DynamicTemporaries::declare(SourceOperandIndex sourceOperandIndex, uint32_t
}
bool DynamicTemporaries::redeclare(SourceOperandIndex sourceOperandIndex,
- const hidl_vec<uint32_t>& newDimensions, uint32_t newLength) {
+ const Dimensions& newDimensions, uint32_t newLength) {
auto createAndLogResult = [sourceOperandIndex, &newDimensions, newLength](bool changedShape) {
VLOG(EXECUTION) << "DynamicTemporaries::redeclare(sourceOperandIndex = "
<< toString(sourceOperandIndex)
@@ -389,31 +370,19 @@ int ExecutionStep::addOperand(uint32_t sourceOperandIndex, uint32_t* stepOperand
// Sets its value.
switch (operand.lifetime) {
- case OperandLifeTime::CONSTANT_COPY: {
+ case Operand::LifeTime::CONSTANT_COPY: {
const uint8_t* data = sourceModel.getPointerToOperandValue(operand.location.offset);
n = mStepModel.setOperandValue(*stepOperandIndex, data, operand.location.length);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- case OperandLifeTime::CONSTANT_REFERENCE: {
- const Memory* memory = sourceModel.getMemories()[operand.location.poolIndex];
+ case Operand::LifeTime::CONSTANT_REFERENCE: {
+ const RuntimeMemory* memory = sourceModel.getMemories()[operand.location.poolIndex];
n = mStepModel.setOperandValueFromMemory(
*stepOperandIndex, memory, operand.location.offset, operand.location.length);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- case OperandLifeTime::NO_VALUE: {
+ case Operand::LifeTime::NO_VALUE: {
n = mStepModel.setOperandValue(*stepOperandIndex, nullptr, 0);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- case OperandLifeTime::TEMPORARY_VARIABLE: { // handled similarly to SUBGRAPH_OUTPUT
+ case Operand::LifeTime::TEMPORARY_VARIABLE: { // handled similarly to SUBGRAPH_OUTPUT
if (kind == INPUT) {
// The first time we've seen this operand is as an
// input. That means it must be defined by a
@@ -427,10 +396,10 @@ int ExecutionStep::addOperand(uint32_t sourceOperandIndex, uint32_t* stepOperand
mIndex);
}
} break;
- case OperandLifeTime::SUBGRAPH_INPUT: {
+ case Operand::LifeTime::SUBGRAPH_INPUT: {
mModelInputs.emplace_back(sourceOperandIndex, *stepOperandIndex);
} break;
- case OperandLifeTime::SUBGRAPH_OUTPUT: { // handled similarly to TEMPORARY_VARIABLE
+ case Operand::LifeTime::SUBGRAPH_OUTPUT: { // handled similarly to TEMPORARY_VARIABLE
if (kind == INPUT) {
// The first time we've seen this operand is as an
// input. That means it must be defined by a
@@ -446,20 +415,20 @@ int ExecutionStep::addOperand(uint32_t sourceOperandIndex, uint32_t* stepOperand
mIndex);
}
} break;
- case OperandLifeTime::SUBGRAPH: {
+ case Operand::LifeTime::SUBGRAPH: {
const ModelBuilder* model = sourceModel.getReferencedModel(operand);
n = mStepModel.setOperandValueFromModel(*stepOperandIndex, model);
- if (n != ANEURALNETWORKS_NO_ERROR) {
- LOG(ERROR) << "Previous error occurred when partitioning the graph";
- return n;
- }
} break;
- default: {
- CHECK(!"unexpected");
+ case Operand::LifeTime::POINTER: {
+ const void* data = std::get<const void*>(operand.location.pointer);
+ n = mStepModel.setOperandValue(*stepOperandIndex, data, operand.location.length);
} break;
}
- return ANEURALNETWORKS_NO_ERROR;
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ LOG(ERROR) << "Previous error occurred when partitioning the graph";
+ }
+ return n;
}
int ExecutionStep::addOperation(int operationIndex) {
@@ -477,7 +446,7 @@ int ExecutionStep::addOperation(int operationIndex) {
// constant, or an operand written by a different partition.
//
// - We should not have seen any outputs.
- auto addOperands = [this](const hidl_vec<uint32_t>& sourceModelOperands,
+ auto addOperands = [this](const std::vector<uint32_t>& sourceModelOperands,
std::vector<uint32_t>* stepModelOperands, OperandKind kind) -> int {
const uint32_t operandCount = static_cast<uint32_t>(sourceModelOperands.size());
for (uint32_t i = 0; i < operandCount; i++) {
@@ -498,7 +467,7 @@ int ExecutionStep::addOperation(int operationIndex) {
void ExecutionStep::mapInputsAndOutputs(
std::shared_ptr<StepExecutor> executor,
- const std::vector<hal::OutputShape>* mainModelOutputShapes, const Memory* temporaryMemory,
+ const std::vector<OutputShape>* mainModelOutputShapes, const RuntimeMemory* temporaryMemory,
const std::map<SourceOperandIndex, uint32_t>& sourceOperandToOffsetOfTemporary,
const DynamicTemporaries& dynamicTemporaries,
const std::map<SourceOperandIndex, uint32_t>& sourceOperandToInputIndex,
@@ -674,10 +643,10 @@ void ExecutionStep::logStepModel() const {
}
static bool hasUnknownSize(const Operand& operand) {
- if (operand.dimensions.size() == 0) {
+ if (operand.dimensions.empty()) {
return TypeManager::get()->isTensorType(operand.type);
}
- for (uint32_t dimension : operand.dimensions) {
+ for (const Dimension& dimension : operand.dimensions) {
if (dimension == 0) {
return true;
}
@@ -693,8 +662,8 @@ int ExecutionStep::finishStepModel(const ModelBuilder* mainModel, bool* hasOutpu
const Operand& operand = mStepModel.getOperand(stepModelOutput.second);
if (hasUnknownSize(operand)) {
*hasOutputOfUnknownSize = true;
- VLOG(COMPILATION) << "StepModelOutput (operand#" << toString(stepModelOutput.first)
- << " of source graph) has unknown size: " << toString(operand);
+ VLOG(COMPILATION) << "StepModelOutput (operand#" << stepModelOutput.first
+ << " of source graph) has unknown size: " << operand;
}
}
@@ -779,38 +748,32 @@ int ExecutionStep::finishStepModel(const ModelBuilder* mainModel, bool* hasOutpu
void ExecutionStep::dump() const {
if (VLOG_IS_ON(COMPILATION)) {
VLOG(COMPILATION) << "Step#" << mIndex << ": execute on " << mDevice->getName();
- logModelToInfo(mStepModel.makeHidlModel());
+ logModelToInfo(mStepModel.makeModel());
}
}
-std::string toString(const IfStep& step) {
- std::ostringstream oss;
- oss << "Step#" << step.index << ": if " << toString(step.conditionOperandIndex)
- << " then=" << step.thenStepIndex << " else=" << step.elseStepIndex;
- return oss.str();
+std::ostream& operator<<(std::ostream& os, const IfStep& step) {
+ return os << "Step#" << step.index << ": if " << toString(step.conditionOperandIndex)
+ << " then=" << step.thenStepIndex << " else=" << step.elseStepIndex;
}
-std::string toString(const WhileStep& step) {
- std::ostringstream oss;
- oss << "Step#" << step.index << ": while cond=" << step.condStepIndex
- << " body=" << step.bodyStepIndex << " exit=" << step.exitStepIndex;
- return oss.str();
+std::ostream& operator<<(std::ostream& os, const WhileStep& step) {
+ return os << "Step#" << step.index << ": while cond=" << step.condStepIndex
+ << " body=" << step.bodyStepIndex << " exit=" << step.exitStepIndex;
}
-std::string toString(const GotoStep& step) {
- std::ostringstream oss;
- oss << "Step#" << step.index << ": goto " << step.gotoStepIndex;
- return oss.str();
+std::ostream& operator<<(std::ostream& os, const GotoStep& step) {
+ return os << "Step#" << step.index << ": goto " << step.gotoStepIndex;
}
void LogicalStep::dump() const {
if (VLOG_IS_ON(COMPILATION)) {
if (const IfStep* step = tryIfStep()) {
- VLOG(COMPILATION) << toString(*step);
+ VLOG(COMPILATION) << *step;
} else if (const WhileStep* step = tryWhileStep()) {
- VLOG(COMPILATION) << toString(*step);
+ VLOG(COMPILATION) << *step;
} else if (const GotoStep* step = tryGotoStep()) {
- VLOG(COMPILATION) << toString(*step);
+ VLOG(COMPILATION) << *step;
} else {
executionStep()->dump();
}
@@ -897,12 +860,17 @@ void ExecutionPlan::CompoundBody::findControlFlowBoundaryConstants(
const ModelBuilder* sourceModel = sourceModels->getModel(sourceOperandIndex.first);
const Operand& operand = sourceModel->getOperand(sourceOperandIndex.second);
const DataLocation& location = operand.location;
- if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
mSourceOperandToBoundaryConstantCopy[sourceOperandIndex] = {
.buffer = sourceModel->getPointerToOperandValue(location.offset),
.length = location.length,
};
- } else if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ } else if (operand.lifetime == Operand::LifeTime::POINTER) {
+ mSourceOperandToBoundaryConstantCopy[sourceOperandIndex] = {
+ .buffer = static_cast<const uint8_t*>(std::get<const void*>(location.pointer)),
+ .length = location.length,
+ };
+ } else if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE) {
mSourceOperandToBoundaryConstantReference[sourceOperandIndex] = {
.memory = sourceModel->getMemories()[location.poolIndex],
.offset = location.offset,
@@ -1043,7 +1011,7 @@ std::shared_ptr<ExecutionPlan::Controller> ExecutionPlan::makeController(
if (mState == SIMPLE) {
return std::shared_ptr<Controller>(new Controller(this, executionBuilder, burstBuilder));
}
- // Create the layout for a Memory object big enough to hold
+ // Create the layout for a RuntimeMemory object big enough to hold
// - every partition boundary TEMPORARY operand that is not a dynamic temporary, and
// - buffers required by the control flow implementation.
//
@@ -1078,17 +1046,17 @@ std::shared_ptr<ExecutionPlan::Controller> ExecutionPlan::makeController(
[executionBuilder, &totalSizeOfTemporaries](
const SourceOperandIndex& sourceOperandIndex,
std::map<SourceOperandIndex, uint32_t>* sourceOperandToOffsetOfTemporary,
- OperandLifeTime lifetime = OperandLifeTime::TEMPORARY_VARIABLE) {
- CHECK(lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- lifetime == OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime lifetime = Operand::LifeTime::TEMPORARY_VARIABLE) {
+ CHECK(lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT);
const Operand& sourceOperand =
executionBuilder->getSourceOperand(sourceOperandIndex);
- if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE &&
- sourceOperand.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE &&
+ sourceOperand.lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
// See the caller for explanation.
return;
}
- CHECK(sourceOperand.lifetime == lifetime);
+ CHECK_EQ(sourceOperand.lifetime, lifetime);
const uint32_t size = TypeManager::get()->getSizeOfData(sourceOperand);
if (size != 0u) {
const uint32_t offset = addTemporaryOfSize(&totalSizeOfTemporaries, size);
@@ -1100,8 +1068,8 @@ std::shared_ptr<ExecutionPlan::Controller> ExecutionPlan::makeController(
} else {
// Unknown size, hence dynamic temporary. The mapping will
// be established elsewhere (DynamicTemporaries::allocate()).
- CHECK(lifetime == OperandLifeTime::TEMPORARY_VARIABLE);
- CHECK(sourceOperand.lifetime == OperandLifeTime::TEMPORARY_VARIABLE);
+ CHECK_EQ(lifetime, Operand::LifeTime::TEMPORARY_VARIABLE);
+ CHECK_EQ(sourceOperand.lifetime, Operand::LifeTime::TEMPORARY_VARIABLE);
}
};
std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary;
@@ -1170,15 +1138,15 @@ std::shared_ptr<ExecutionPlan::Controller> ExecutionPlan::makeController(
// so (b/148206073).
for (const auto& sourceOperandIndex : step->bodyOutputOperands) {
mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary,
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime::SUBGRAPH_OUTPUT);
// Allocate another set of temporaries for double buffering.
mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary2,
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime::SUBGRAPH_OUTPUT);
}
// Allocate memory for condition model output.
// TODO: Share one condition output memory region between all loops.
mapTemporary(step->condOutputOperand, &sourceOperandToOffsetOfTemporary,
- OperandLifeTime::SUBGRAPH_OUTPUT);
+ Operand::LifeTime::SUBGRAPH_OUTPUT);
} else {
CHECK(logicalStep->isGoto());
}
@@ -1245,7 +1213,7 @@ int ExecutionPlan::fallback(std::shared_ptr<Controller> controller,
}
ExecutionPlan::Buffer::Buffer(void* pointer, uint32_t size)
- : mInfo(RunTimePoolInfo::createFromExistingBuffer(reinterpret_cast<uint8_t*>(pointer), size)),
+ : mInfo(RunTimePoolInfo::createFromExistingBuffer(static_cast<uint8_t*>(pointer), size)),
mOffset(0) {}
ExecutionPlan::Buffer::Buffer(RunTimePoolInfo info, uint32_t offset)
@@ -1515,7 +1483,7 @@ int ExecutionPlan::nextCompound(const IfStep* step, std::shared_ptr<Controller>
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
const std::vector<OutputShape>* mainModelOutputShapes) const {
- VLOG(EXECUTION) << "next: " << toString(*step);
+ VLOG(EXECUTION) << "next: " << *step;
// If the last step has a sync fence, wait for it to signal before reading the condition value.
// This is safe because the steps are serialized when doing fenced compute.
NN_RETURN_IF_ERROR(controller->waitForLastStepSyncFence());
@@ -1558,7 +1526,7 @@ int ExecutionPlan::nextCompound(const WhileStep* step, std::shared_ptr<Controlle
WhileState& state = controller->mWhileState[controller->mNextStepIndex];
if (state.stage == WhileState::EVALUATE_CONDITION) {
state.iteration = state.iteration == WhileState::kOutsideLoop ? 0 : state.iteration + 1;
- VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration
+ VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration
<< ": evaluating condition";
controller->mNextStepIndex = step->condStepIndex;
@@ -1602,7 +1570,7 @@ int ExecutionPlan::nextCompound(const WhileStep* step, std::shared_ptr<Controlle
bool condValue;
NN_RETURN_IF_ERROR(readConditionValue(controller, step->condOutputOperand, &condValue));
if (condValue) {
- VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration
+ VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration
<< ": evaluating body";
controller->mNextStepIndex = step->bodyStepIndex;
@@ -1632,7 +1600,7 @@ int ExecutionPlan::nextCompound(const WhileStep* step, std::shared_ptr<Controlle
}
}
} else {
- VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration
+ VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration
<< ": exiting loop";
controller->mNextStepIndex = step->exitStepIndex;
@@ -1677,7 +1645,7 @@ int ExecutionPlan::nextCompound(const GotoStep* step, std::shared_ptr<Controller
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
const std::vector<OutputShape>* mainModelOutputShapes) const {
- VLOG(EXECUTION) << "next: " << toString(*step);
+ VLOG(EXECUTION) << "next: " << *step;
controller->mNextStepIndex = step->gotoStepIndex;
return nextCompound(controller, executor, burstController, mainModelOutputShapes);
}
@@ -1905,7 +1873,7 @@ int ModelBuilder::partitionTheWork(const std::vector<std::shared_ptr<Device>>& d
int n = plan->finish(preference, priority, deadline, simulateFailureResultCode);
if (VLOG_IS_ON(COMPILATION)) {
VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: source model: ";
- logModelToInfo(makeHidlModel());
+ logModelToInfo(makeModel());
plan->dump();
}
return n;
@@ -2148,7 +2116,7 @@ int ModelBuilder::partitionTheWorkInternal(uint32_t sourceModelIndex,
bodyModelIndex, bodyModel->getOutputOperandIndex(i));
}
} else {
- CHECK(false) << toString(operation.type) << " is not a control flow operation";
+ CHECK(false) << operation.type << " is not a control flow operation";
}
tracker.markProcessed(operationIndex, enqueueOnAppropriateDevice);
}
@@ -2176,7 +2144,7 @@ float ModelBuilder::getPerformance(uint32_t preference,
float ModelBuilder::getPerformance(uint32_t preference, const std::shared_ptr<Device> device,
uint32_t operationIndex) const {
- auto applyPreference = [preference](const PerformanceInfo& perf) {
+ auto applyPreference = [preference](const Capabilities::PerformanceInfo& perf) {
return preference == ANEURALNETWORKS_PREFER_LOW_POWER ? perf.powerUsage : perf.execTime;
};
@@ -2300,7 +2268,7 @@ class CanDo {
int ModelBuilder::findBestDeviceForEachOperation(
uint32_t preference, const std::vector<std::shared_ptr<Device>>& devices,
std::vector<int>* bestDeviceForOperation) const {
- const MetaModel metaModel(makeHidlModel(), DeviceManager::get()->strictSlicing());
+ const MetaModel metaModel(makeModel(), DeviceManager::get()->strictSlicing());
const size_t deviceCount = devices.size();
std::vector<CanDo> canDo(deviceCount);
@@ -2345,13 +2313,13 @@ int ModelBuilder::findBestDeviceForEachOperation(
// Logs O(operationCount * deviceCount) times, but typically deviceCount is
// very small.
VLOG(COMPILATION) << "Device " << device->getName() << " can't do operation "
- << toString(operation.type);
+ << operation.type;
}
}
}
if (bestChoice < 0) {
- LOG(ERROR) << "No driver can do operation " << toString(operation.type);
+ LOG(ERROR) << "No driver can do operation " << operation.type;
return ANEURALNETWORKS_BAD_DATA;
} else if (devices[bestChoice] == DeviceManager::getCpuDevice() &&
supportedByControlFlowInterpreter(operationIndex)) {
@@ -2359,15 +2327,13 @@ int ModelBuilder::findBestDeviceForEachOperation(
// to delegate referenced models.
const int kControlFlowInterpreter = deviceCount;
(*bestDeviceForOperation)[operationIndex] = kControlFlowInterpreter;
- VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation("
- << toString(operation.type) << ":" << operationIndex << ") = -1"
- << " (NNAPI)";
+ VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" << operation.type
+ << operation.type << ":" << operationIndex << ") = -1 (NNAPI)";
} else {
(*bestDeviceForOperation)[operationIndex] = bestChoice;
- VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation("
- << toString(operation.type) << ":" << operationIndex
- << ") = " << bestChoice << " (" << devices[bestChoice]->getName()
- << ")";
+ VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" << operation.type
+ << ":" << operationIndex << ") = " << bestChoice << " ("
+ << devices[bestChoice]->getName() << ")";
}
}
return ANEURALNETWORKS_NO_ERROR;
diff --git a/nn/runtime/ExecutionPlan.h b/nn/runtime/ExecutionPlan.h
index 740912d8e..097fbd600 100644
--- a/nn/runtime/ExecutionPlan.h
+++ b/nn/runtime/ExecutionPlan.h
@@ -20,6 +20,7 @@
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_PLAN_H
#include <android-base/logging.h>
+#include <nnapi/Types.h>
#include <openssl/sha.h>
#include <algorithm>
@@ -35,7 +36,6 @@
#include <variant>
#include <vector>
-#include "HalInterfaces.h"
#include "Memory.h"
#include "ModelArgumentInfo.h"
#include "ModelBuilder.h"
@@ -52,8 +52,8 @@ class Device;
class ExecutionBuilder;
class ExecutionBurstController;
class ExecutionPlan;
-class Memory;
-class PreparedModel;
+class RuntimeMemory;
+class RuntimePreparedModel;
class StepExecutor;
struct ConstantReferenceLocation;
@@ -142,7 +142,7 @@ class DynamicTemporaries {
// operand). initialDimensions and initialLength indicate what we know or
// (in the case of length) guess about those properties.
void declare(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex,
- const hal::hidl_vec<uint32_t>& initialDimensions, uint32_t initialLength);
+ const Dimensions& initialDimensions, uint32_t initialLength);
// Indicate that we've finished declaring all dynamic temporaries.
void endDeclarations() {
@@ -153,8 +153,8 @@ class DynamicTemporaries {
// Redeclare a dynamic temporary, indicating what we've learned about it.
// This may invalidate the location of temporaries defined by its step.
// Returns true if dimensions or length changed, false otherwise.
- bool redeclare(SourceOperandIndex sourceOperandIndex,
- const hal::hidl_vec<uint32_t>& newDimensions, uint32_t newLength);
+ bool redeclare(SourceOperandIndex sourceOperandIndex, const Dimensions& newDimensions,
+ uint32_t newLength);
// Ensure that all dynamic temporaries defined by the specified step have
// locations. The return value is a ResultCode (e.g.,
@@ -180,9 +180,9 @@ class DynamicTemporaries {
// - If mustBeAllocated == true, then trigger a failed CHECK().
// - If mustBeAllocated == false, then memory == nullptr and offset == ~0.
struct LocationAndShape {
- const Memory* memory;
+ const RuntimeMemory* memory;
uint32_t offset;
- const hal::hidl_vec<uint32_t>* dimensions;
+ const Dimensions* dimensions;
uint32_t length;
};
std::optional<LocationAndShape> lookup(SourceOperandIndex sourceOperandIndex,
@@ -197,7 +197,7 @@ class DynamicTemporaries {
struct InternalLocationAndShape {
uint32_t stepIndex;
uint32_t offset;
- hal::hidl_vec<uint32_t> dimensions;
+ Dimensions dimensions;
uint32_t length;
};
std::map<SourceOperandIndex, InternalLocationAndShape> mSourceOperandToTemporary;
@@ -267,7 +267,9 @@ class ExecutionStep {
std::shared_ptr<Device> getDevice() const { return mDevice; }
// only available after calling finishStepModel()
- std::shared_ptr<PreparedModel> getPreparedStepModel() const { return mPreparedStepModel; }
+ std::shared_ptr<RuntimePreparedModel> getPreparedStepModel() const {
+ return mPreparedStepModel;
+ }
// Map inputs and outputs from ExecutionBuilder to StepExecutor.
//
@@ -278,8 +280,8 @@ class ExecutionStep {
// inputs of this step are of fully specified shape.
void mapInputsAndOutputs(
std::shared_ptr<StepExecutor> stepExecutor,
- const std::vector<hal::OutputShape>* mainModelOutputShapes,
- const Memory* temporaryMemory, // for static temporaries
+ const std::vector<OutputShape>* mainModelOutputShapes,
+ const RuntimeMemory* temporaryMemory, // for static temporaries
const std::map<SourceOperandIndex, uint32_t>&
sourceOperandToOffsetOfTemporary, // for static temporaries
const DynamicTemporaries& dynamicTemporaries,
@@ -306,7 +308,7 @@ class ExecutionStep {
uint32_t mSourceModelIndex;
ModelBuilder mStepModel; // An excerpt of a source model to be run by one device.
std::shared_ptr<Device> mDevice;
- std::shared_ptr<PreparedModel> mPreparedStepModel;
+ std::shared_ptr<RuntimePreparedModel> mPreparedStepModel;
// All inputs of this step model:
// (source model operand index, step model operand index)
@@ -510,9 +512,9 @@ class LogicalStep {
std::variant<ExecutionStep, IfStep, WhileStep, GotoStep> mStep;
};
-std::string toString(const IfStep& step);
-std::string toString(const WhileStep& step);
-std::string toString(const GotoStep& step);
+std::ostream& operator<<(std::ostream& os, const IfStep& step);
+std::ostream& operator<<(std::ostream& os, const WhileStep& step);
+std::ostream& operator<<(std::ostream& os, const GotoStep& step);
// Describes the state of WhileStep.
struct WhileState {
@@ -533,7 +535,7 @@ struct ConstantCopyLocation {
};
struct ConstantReferenceLocation {
- const Memory* memory;
+ const RuntimeMemory* memory;
uint32_t offset;
uint32_t length;
};
@@ -660,13 +662,13 @@ class ExecutionPlan {
// syncFdOfLastStep is the sync fence fd generated by the most recently processed step.
int next(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes,
+ const std::vector<OutputShape>* mainModelOutputShapes,
int syncFdOfLastStep = -1) const;
// Create the same executor as the last one created by next().
int fallback(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
ExecutionStep* createNewExecutionStep(uint32_t sourceModelIndex,
const std::shared_ptr<Device> device);
@@ -737,8 +739,7 @@ class ExecutionPlan {
// Illegal to call for when mState == SIMPLE.
void becomeCompoundIfEmpty();
- const hal::Operand& getSourceOperand(
- const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
+ const Operand& getSourceOperand(const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const {
return getSourceModels()
.getModel(sourceOperandIndex.first)
->getOperand(sourceOperandIndex.second);
@@ -770,23 +771,23 @@ class ExecutionPlan {
int nextCompound(std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const ExecutionStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const IfStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const WhileStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
int nextCompound(const GotoStep* step, std::shared_ptr<Controller> controller,
std::shared_ptr<StepExecutor>* executor,
std::shared_ptr<ExecutionBurstController>* burstController,
- const std::vector<hal::OutputShape>* mainModelOutputShapes) const;
+ const std::vector<OutputShape>* mainModelOutputShapes) const;
struct Body {
virtual ~Body() {}
@@ -818,7 +819,7 @@ class ExecutionPlan {
std::shared_ptr<Device> mDevice;
const ModelBuilder* mModel;
- std::shared_ptr<PreparedModel> mPreparedModel;
+ std::shared_ptr<RuntimePreparedModel> mPreparedModel;
const std::string* mCacheDir;
TokenHasher mToken;
@@ -862,7 +863,8 @@ class ExecutionPlan {
// to initialize ExecutionPlan::Controller::mSourceOperandToOutputIndex;
std::map<SourceOperandIndex, uint32_t> mSourceOperandToOutputIndex;
- // Map from source operand index to location of a CONSTANT_COPY operand.
+ // Map from source operand index to location of a CONSTANT_COPY or
+ // POINTER operand.
// This map only contains constant partition boundary IF and WHILE
// operands and is used to create a ExecutionPlan::Controller.
std::map<SourceOperandIndex, ConstantCopyLocation> mSourceOperandToBoundaryConstantCopy;
@@ -887,9 +889,9 @@ class ExecutionPlan {
// values with the corresponding SUBGRAPH_INPUT operands in a referenced
// model.
//
- // For CONSTANT_COPY boundary operands, we copy those to temporary
- // memory and treat them similarly to TEMPORARY_VARIABLE operands in
- // Controller.
+ // For CONSTANT_COPY and POINTER boundary operands, we copy those to
+ // temporary memory and treat them similarly to TEMPORARY_VARIABLE
+ // operands in Controller.
//
// For CONSTANT_REFERENCE boundary operands, we keep track of them in
// ExecutionPlan::Controller::mSourceOperandToConstantReference.
@@ -923,7 +925,7 @@ class ExecutionPlan {
return static_cast<const CompoundBody*>(mBody);
}
- void forEachDynamicTemporary(const std::function<void(SourceOperandIndex, const hal::Operand&,
+ void forEachDynamicTemporary(const std::function<void(SourceOperandIndex, const Operand&,
uint32_t definingStepIndex)>&) const;
// Pointers to compilation caching information in CompilationBuilder.
diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp
index 78d7c36a9..90d58e490 100644
--- a/nn/runtime/Manager.cpp
+++ b/nn/runtime/Manager.cpp
@@ -47,17 +47,13 @@
namespace android {
namespace nn {
-using namespace hal;
-
-const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-
// A Device with actual underlying driver
class DriverDevice : public Device {
public:
// Create a DriverDevice from a name and a DeviceFactory function.
// Returns nullptr on failure.
static std::shared_ptr<DriverDevice> create(const std::string& name,
- const DeviceFactory& makeDevice);
+ const HalDeviceFactory& makeDevice);
// Prefer using DriverDevice::create
DriverDevice(std::shared_ptr<VersionedIDevice> device);
@@ -70,25 +66,20 @@ class DriverDevice : public Device {
return kInterface->getSupportedExtensions();
}
std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override;
- PerformanceInfo getPerformance(OperandType type) const override {
- const auto& capabilities = kInterface->getCapabilities();
- return lookup(capabilities.operandPerformance, type);
+ Capabilities::PerformanceInfo getPerformance(OperandType type) const override {
+ return kInterface->getCapabilities().operandPerformance.lookup(type);
}
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.relaxedFloat32toFloat16PerformanceScalar;
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
+ return kInterface->getCapabilities().relaxedFloat32toFloat16PerformanceScalar;
}
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.relaxedFloat32toFloat16PerformanceTensor;
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
+ return kInterface->getCapabilities().relaxedFloat32toFloat16PerformanceTensor;
}
- PerformanceInfo getIfPerformance() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.ifPerformance;
+ Capabilities::PerformanceInfo getIfPerformance() const override {
+ return kInterface->getCapabilities().ifPerformance;
}
- PerformanceInfo getWhilePerformance() const override {
- const auto& capabilities = kInterface->getCapabilities();
- return capabilities.whilePerformance;
+ Capabilities::PerformanceInfo getWhilePerformance() const override {
+ return kInterface->getCapabilities().whilePerformance;
}
bool isCachingSupported() const override {
// Caching is supported if either of numModelCache or numDataCache is greater than 0.
@@ -98,13 +89,13 @@ class DriverDevice : public Device {
}
int wait() const override { return kInterface->wait(); }
- std::pair<int, std::shared_ptr<PreparedModel>> prepareModel(
+ std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
const std::optional<CacheToken>& maybeToken) const override;
- std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc,
- hal::OperandType) const override;
+ std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
+ OperandType) const override;
private:
const std::shared_ptr<VersionedIDevice> kInterface;
@@ -117,8 +108,8 @@ class DriverDevice : public Device {
#endif // NN_DEBUGGABLE
};
-// A PreparedModel with underlying IPreparedModel instance return by actual driver.
-class DriverPreparedModel : public PreparedModel {
+// A RuntimePreparedModel with underlying IPreparedModel instance return by actual driver.
+class DriverPreparedModel : public RuntimePreparedModel {
public:
DriverPreparedModel(const Device* device,
const std::shared_ptr<VersionedIPreparedModel>& preparedModel)
@@ -134,18 +125,18 @@ class DriverPreparedModel : public PreparedModel {
std::tuple<int, std::vector<OutputShape>, Timing> execute(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const override;
- std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
+ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& waitFor,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
std::shared_ptr<ExecutionBurstController> configureExecutionBurst(
bool preferPowerOverLatency) const override {
@@ -169,7 +160,7 @@ DriverDevice::DriverDevice(std::shared_ptr<VersionedIDevice> device)
}
std::shared_ptr<DriverDevice> DriverDevice::create(const std::string& name,
- const DeviceFactory& makeDevice) {
+ const HalDeviceFactory& makeDevice) {
CHECK(makeDevice != nullptr);
std::shared_ptr<VersionedIDevice> device = VersionedIDevice::create(name, makeDevice);
if (device == nullptr) {
@@ -187,10 +178,10 @@ std::vector<bool> DriverDevice::getSupportedOperations(const MetaModel& metaMode
std::vector<bool> supportedOperations;
std::tie(status, supportedOperations) = kInterface->getSupportedOperations(metaModel);
- const Model& hidlModel = metaModel.getModel();
- const uint32_t operationCount = hidlModel.main.operations.size();
+ const Model& model = metaModel.getModel();
+ const uint32_t operationCount = model.main.operations.size();
if (status != ErrorStatus::NONE) {
- LOG(ERROR) << "IDevice::getSupportedOperations returned the error " << toString(status);
+ LOG(ERROR) << "IDevice::getSupportedOperations returned the error " << status;
// Set the supported operation vectors to all false, so we won't use this driver.
return std::vector<bool>(operationCount, false);
}
@@ -213,17 +204,18 @@ std::vector<bool> DriverDevice::getSupportedOperations(const MetaModel& metaMode
}
uint32_t accumulator = baseAccumulator;
- const Operation& operation = hidlModel.main.operations[operationIndex];
+ const Operation& operation = model.main.operations[operationIndex];
accumulator ^= static_cast<uint32_t>(operation.type);
- auto accumulateOperands = [&hidlModel, &accumulator](const hidl_vec<uint32_t>& operands) {
+ auto accumulateOperands = [&model, &accumulator](const std::vector<uint32_t>& operands) {
for (uint32_t operandIndex : operands) {
- const Operand& operand = hidlModel.main.operands[operandIndex];
+ const Operand& operand = model.main.operands[operandIndex];
accumulator ^= static_cast<uint32_t>(operand.type);
accumulator ^= operand.dimensions.size();
- for (uint32_t dimension : operand.dimensions) {
+ for (const Dimension& dimension : operand.dimensions) {
accumulator ^= dimension;
- if (operand.lifetime == OperandLifeTime::CONSTANT_COPY ||
- operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY ||
+ operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE ||
+ operand.lifetime == Operand::LifeTime::POINTER) {
accumulator ^= 1;
}
}
@@ -240,7 +232,7 @@ std::vector<bool> DriverDevice::getSupportedOperations(const MetaModel& metaMode
return supportedOperations;
}
-std::pair<int, std::shared_ptr<PreparedModel>> DriverDevice::prepareModel(
+std::pair<int, std::shared_ptr<RuntimePreparedModel>> DriverDevice::prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
const std::optional<CacheToken>& maybeToken) const {
@@ -253,9 +245,9 @@ std::pair<int, std::shared_ptr<PreparedModel>> DriverDevice::prepareModel(
return {ANEURALNETWORKS_NO_ERROR, std::make_shared<DriverPreparedModel>(this, preparedModel)};
}
-std::pair<int, std::unique_ptr<Memory>> DriverDevice::allocate(const MemoryDescriptor& desc,
- hal::OperandType) const {
- const BufferDesc hidlDesc = {.dimensions = desc.dimensions};
+std::pair<int, std::unique_ptr<RuntimeMemory>> DriverDevice::allocate(const MemoryDescriptor& desc,
+ OperandType) const {
+ const V1_3::BufferDesc hidlDesc = {.dimensions = desc.dimensions};
std::vector<std::shared_ptr<VersionedIPreparedModel>> preparedModels(
desc.preparedModels.size());
std::transform(desc.preparedModels.begin(), desc.preparedModels.end(), preparedModels.begin(),
@@ -266,7 +258,7 @@ std::pair<int, std::unique_ptr<Memory>> DriverDevice::allocate(const MemoryDescr
});
auto [status, buffer, token] =
kInterface->allocate(hidlDesc, preparedModels, desc.inputRoles, desc.outputRoles);
- if (status != ErrorStatus::NONE) {
+ if (status != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "DriverDevice::allocate -- memory allocation on device " << getName()
<< " failed!";
return {convertErrorStatusToResultCode(status), nullptr};
@@ -279,7 +271,7 @@ std::pair<int, std::unique_ptr<Memory>> DriverDevice::allocate(const MemoryDescr
// input a bit.
static std::tuple<int, std::unique_ptr<MemoryAshmem>, std::vector<DataLocation>>
allocatePointerArgumentsToPool(const std::vector<ModelArgumentInfo>& args,
- std::vector<const Memory*>* memories) {
+ std::vector<const RuntimeMemory*>* memories) {
CHECK(memories != nullptr);
std::vector<DataLocation> ptrArgsLocations;
const uint32_t nextPoolIndex = memories->size();
@@ -321,14 +313,14 @@ allocatePointerArgumentsToPool(const std::vector<ModelArgumentInfo>& args,
// DeviceManager::mSyncExecHal.
std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute(
const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::execute");
// Make a copy of the memory tracker as we will append memory pools for pointer arguments.
- std::vector<const Memory*> localMemories = memories;
+ std::vector<const RuntimeMemory*> localMemories = memories;
// We separate the input & output pools so accelerators only need to copy
// the contents of the input pools. We could also use it to set protection
@@ -338,12 +330,12 @@ std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute(
const auto [n1, inputPtrArgsMemory, inputPtrArgsLocations] =
allocatePointerArgumentsToPool(inputs, &localMemories);
if (n1 != ANEURALNETWORKS_NO_ERROR) {
- return {n1, {}, kNoTiming};
+ return {n1, {}, {}};
}
const auto [n2, outputPtrArgsMemory, outputPtrArgsLocations] =
allocatePointerArgumentsToPool(outputs, &localMemories);
if (n2 != ANEURALNETWORKS_NO_ERROR) {
- return {n2, {}, kNoTiming};
+ return {n2, {}, {}};
}
// Copy the input data that was specified via a pointer.
@@ -364,7 +356,7 @@ std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute(
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
- request.pools[i] = localMemories[i]->getMemoryPool();
+ request.pools[i] = uncheckedConvert(localMemories[i]->getMemoryPool());
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
@@ -372,26 +364,30 @@ std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute(
int n = ANEURALNETWORKS_OP_FAILED;
std::vector<OutputShape> outputShapes;
- Timing timing = kNoTiming;
+ Timing timing;
// compute using burst if present
const bool burstCompute = (burstController != nullptr);
bool burstFallback = true;
if (burstCompute) {
- const bool compliant = compliantWithV1_2(request);
+ const bool compliant = compliantWithV1_2(convertToV1_3(request));
if (compliant) {
- V1_0::Request request12 = convertToV1_2(request);
+ V1_0::Request request12 = convertToV1_2(convertToV1_3(request));
std::vector<intptr_t> memoryIds;
memoryIds.reserve(localMemories.size());
- for (const Memory* memory : localMemories) {
+ for (const RuntimeMemory* memory : localMemories) {
memory->usedBy(burstController);
memoryIds.push_back(memory->getKey());
}
VLOG(EXECUTION) << "Before ExecutionBurstController->compute() "
<< SHOW_IF_DEBUG(toString(request12));
- std::tie(n, outputShapes, timing, burstFallback) =
- burstController->compute(request12, measure, memoryIds);
+ std::vector<V1_2::OutputShape> halOutputShapes;
+ V1_2::Timing halTiming;
+ std::tie(n, halOutputShapes, halTiming, burstFallback) =
+ burstController->compute(request12, convertToV1_2(measure), memoryIds);
+ outputShapes = uncheckedConvert(halOutputShapes);
+ timing = uncheckedConvert(halTiming);
}
}
@@ -426,19 +422,18 @@ std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute(
return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing};
}
-std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing>
-DriverPreparedModel::executeFenced(
+std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> DriverPreparedModel::executeFenced(
const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& waitFor,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const {
NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::executeFenced");
CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd > 0; }));
// Make a copy of the memory tracker as we will append memory pools for pointer arguments.
- std::vector<const Memory*> localMemories = memories;
- sp<hal::IFencedExecutionCallback> executeFencedCallback;
- hal::Timing timing = kNoTiming;
+ std::vector<const RuntimeMemory*> localMemories = memories;
+ sp<V1_3::IFencedExecutionCallback> executeFencedCallback;
+ Timing timing;
// We separate the input & output pools so accelerators only need to copy
// the contents of the input pools. We could also use it to set protection
@@ -474,14 +469,14 @@ DriverPreparedModel::executeFenced(
uint32_t count = localMemories.size();
request.pools.resize(count);
for (uint32_t i = 0; i < count; i++) {
- request.pools[i] = localMemories[i]->getMemoryPool();
+ request.pools[i] = uncheckedConvert(localMemories[i]->getMemoryPool());
}
NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
"DriverPreparedModel::executeFenced");
int n = ANEURALNETWORKS_OP_FAILED;
- hidl_vec<hidl_handle> waitForHandles;
+ hardware::hidl_vec<hardware::hidl_handle> waitForHandles;
waitForHandles.resize(waitFor.size());
for (uint32_t i = 0; i < waitFor.size(); i++) {
native_handle_t* nativeHandle = native_handle_create(1, 0);
@@ -495,12 +490,12 @@ DriverPreparedModel::executeFenced(
return {n, -1, nullptr, timing};
}
nativeHandle->data[0] = dupFd;
- hidl_handle hidlHandle;
+ hardware::hidl_handle hidlHandle;
hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
waitForHandles[i] = std::move(hidlHandle);
}
- hidl_handle syncFence;
+ hardware::hidl_handle syncFence;
std::tie(n, syncFence, executeFencedCallback, timing) =
mPreparedModel->executeFenced(request, waitForHandles, measure, deadline,
loopTimeoutDuration, timeoutDurationAfterFence);
@@ -561,25 +556,27 @@ class CpuDevice : public Device {
return kSupportedExtensions;
}
std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override;
- PerformanceInfo getPerformance(OperandType) const override { return kPerformance; }
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
+ Capabilities::PerformanceInfo getPerformance(OperandType) const override {
return kPerformance;
}
- PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
return kPerformance;
}
- PerformanceInfo getIfPerformance() const override { return kPerformance; }
- PerformanceInfo getWhilePerformance() const override { return kPerformance; }
+ Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
+ return kPerformance;
+ }
+ Capabilities::PerformanceInfo getIfPerformance() const override { return kPerformance; }
+ Capabilities::PerformanceInfo getWhilePerformance() const override { return kPerformance; }
bool isCachingSupported() const override { return false; }
int wait() const override { return ANEURALNETWORKS_NO_ERROR; }
- std::pair<int, std::shared_ptr<PreparedModel>> prepareModel(
+ std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
const std::optional<CacheToken>& maybeToken) const override;
- std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc,
- OperandType type) const override;
+ std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
+ OperandType type) const override;
private:
CpuDevice() = default;
@@ -588,17 +585,17 @@ class CpuDevice : public Device {
const std::string kVersionString = build::GetBuildNumber();
// Since the performance is a ratio compared to the CPU performance,
// by definition the performance of the CPU is 1.0.
- const PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f};
+ const Capabilities::PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f};
const std::vector<Extension> kSupportedExtensions{/* No extensions. */};
};
-// A special abstracted PreparedModel for the CPU, constructed by CpuDevice.
-class CpuPreparedModel : public PreparedModel {
+// A special abstracted RuntimePreparedModel for the CPU, constructed by CpuDevice.
+class CpuPreparedModel : public RuntimePreparedModel {
public:
// Factory method for CpuPreparedModel. Returns ANEURALNETWORKS_NO_ERROR and
// a prepared model object if successfully created. Returns an error code
// and nullptr otherwise.
- static std::pair<int, std::shared_ptr<PreparedModel>> create(Model hidlModel);
+ static std::pair<int, std::shared_ptr<RuntimePreparedModel>> create(Model model);
const Device* getDevice() const override { return CpuDevice::get().get(); }
std::shared_ptr<VersionedIPreparedModel> getInterface() const override { return nullptr; }
@@ -606,7 +603,7 @@ class CpuPreparedModel : public PreparedModel {
std::tuple<int, std::vector<OutputShape>, Timing> execute(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const override;
@@ -616,13 +613,13 @@ class CpuPreparedModel : public PreparedModel {
return nullptr;
}
- std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
+ std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& wait_for,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& wait_for,
MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
// Prefer to use CpuPreparedModel::create.
CpuPreparedModel(Model model, std::vector<RunTimePoolInfo> poolInfos)
@@ -634,21 +631,20 @@ class CpuPreparedModel : public PreparedModel {
};
std::vector<bool> CpuDevice::getSupportedOperations(const MetaModel& metaModel) const {
- const Model& hidlModel = metaModel.getModel();
- const size_t count = hidlModel.main.operations.size();
+ const Model& model = metaModel.getModel();
+ const size_t count = model.main.operations.size();
std::vector<bool> result(count, false);
for (size_t i = 0; i < count; i++) {
// TODO(b/119870033): Decide whether and how post-P operations would be supported on CPU.
// We may want to use the slicer for CpuDevice just as we do for
// DriverDevice.
- OperationType operationType = hidlModel.main.operations[i].type;
- result[i] = !isExtensionOperationType(operationType) &&
- operationType != OperationType::OEM_OPERATION;
+ OperationType operationType = model.main.operations[i].type;
+ result[i] = !isExtension(operationType) && operationType != OperationType::OEM_OPERATION;
}
return result;
}
-std::pair<int, std::shared_ptr<PreparedModel>> CpuDevice::prepareModel(
+std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuDevice::prepareModel(
const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& /*cacheDir*/,
const std::optional<CacheToken>& maybeToken) const {
@@ -656,8 +652,9 @@ std::pair<int, std::shared_ptr<PreparedModel>> CpuDevice::prepareModel(
<< "Should never call prepareModel with cache information on CpuDevice";
const Model model = makeModel();
- if (!validateModel(model, ValidationMode::RUNTIME) ||
- !validateExecutionPreference(preference) || !validatePriority(priority)) {
+ if (!validateModel(convertToV1_3(model), ValidationMode::RUNTIME) ||
+ !validateExecutionPreference(convertToV1_1(preference)) ||
+ !validatePriority(convertToV1_3(priority))) {
return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
if (hasDeadlinePassed(deadline)) {
@@ -667,8 +664,8 @@ std::pair<int, std::shared_ptr<PreparedModel>> CpuDevice::prepareModel(
return CpuPreparedModel::create(model);
}
-std::pair<int, std::unique_ptr<Memory>> CpuDevice::allocate(const MemoryDescriptor& desc,
- OperandType type) const {
+std::pair<int, std::unique_ptr<RuntimeMemory>> CpuDevice::allocate(const MemoryDescriptor& desc,
+ OperandType type) const {
uint32_t size = TypeManager::get()->getSizeOfData(type, desc.dimensions);
if (size == 0) {
LOG(ERROR) << "CpuDevice::allocate -- does not support unknown dimensions.";
@@ -677,14 +674,14 @@ std::pair<int, std::unique_ptr<Memory>> CpuDevice::allocate(const MemoryDescript
return MemoryAshmem::create(size);
}
-std::pair<int, std::shared_ptr<PreparedModel>> CpuPreparedModel::create(Model hidlModel) {
+std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuPreparedModel::create(Model model) {
std::vector<RunTimePoolInfo> poolInfos;
- if (!setRunTimePoolInfosFromHidlMemories(&poolInfos, hidlModel.pools)) {
+ if (!setRunTimePoolInfosFromCanonicalMemories(&poolInfos, model.pools)) {
return {ANEURALNETWORKS_UNMAPPABLE, nullptr};
}
- std::shared_ptr<PreparedModel> preparedModel =
- std::make_shared<CpuPreparedModel>(std::move(hidlModel), std::move(poolInfos));
+ std::shared_ptr<RuntimePreparedModel> preparedModel =
+ std::make_shared<CpuPreparedModel>(std::move(model), std::move(poolInfos));
return {ANEURALNETWORKS_NO_ERROR, std::move(preparedModel)};
}
@@ -696,26 +693,23 @@ static std::tuple<int, std::vector<OutputShape>, Timing> computeOnCpu(
const OptionalTimeoutDuration& loopTimeoutDuration) {
NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "computeOnCpu");
CpuExecutor executor;
- if (loopTimeoutDuration.getDiscriminator() !=
- OptionalTimeoutDuration::hidl_discriminator::none) {
- executor.setLoopTimeout(loopTimeoutDuration.nanoseconds());
+ if (loopTimeoutDuration.has_value()) {
+ executor.setLoopTimeout(loopTimeoutDuration->count());
}
if (deadline.has_value()) {
executor.setDeadline(*deadline);
}
int err = executor.run(model, request, modelPoolInfos, requestPoolInfos);
const auto& outputShapes = executor.getOutputShapes();
- return {err, outputShapes, kNoTiming};
+ return {err, outputShapes, {}};
}
-std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing>
-CpuPreparedModel::executeFenced(const std::vector<ModelArgumentInfo>& inputs,
- const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
- const std::vector<int>& waitFor, hal::MeasureTiming measure,
- const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& duration) const {
+std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> CpuPreparedModel::executeFenced(
+ const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& duration) const {
VLOG(EXECUTION)
<< "CpuPreparedModel::executeFenced wait for sync fences to signal before execution";
for (int syncFd : waitFor) {
@@ -730,8 +724,8 @@ CpuPreparedModel::executeFenced(const std::vector<ModelArgumentInfo>& inputs,
// Update deadline if the timeout duration is closer than the deadline.
auto closestDeadline = deadline;
- if (duration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none) {
- const auto timeoutDurationDeadline = makeDeadline(duration.nanoseconds());
+ if (duration.has_value()) {
+ const auto timeoutDurationDeadline = makeDeadline(*duration);
if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) {
closestDeadline = timeoutDurationDeadline;
}
@@ -751,21 +745,21 @@ CpuPreparedModel::executeFenced(const std::vector<ModelArgumentInfo>& inputs,
// Will choose between sync/async execution according to DeviceManager::mSyncExecCpu.
std::tuple<int, std::vector<OutputShape>, Timing> CpuPreparedModel::execute(
const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
+ const std::vector<const RuntimeMemory*>& memories,
const std::shared_ptr<ExecutionBurstController>& /*burstController*/,
MeasureTiming /*measure*/, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
if (hasDeadlinePassed(deadline)) {
- return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, kNoTiming};
+ return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, {}};
}
std::vector<RunTimePoolInfo> requestPoolInfos;
requestPoolInfos.reserve(memories.size());
- for (const Memory* mem : memories) {
+ for (const RuntimeMemory* mem : memories) {
if (std::optional<RunTimePoolInfo> poolInfo = mem->getRunTimePoolInfo()) {
requestPoolInfos.emplace_back(*poolInfo);
} else {
- return {ANEURALNETWORKS_UNMAPPABLE, {}, kNoTiming};
+ return {ANEURALNETWORKS_UNMAPPABLE, {}, {}};
}
}
// Create as many pools as there are input / output.
@@ -818,7 +812,7 @@ std::shared_ptr<Device> DeviceManager::getCpuDevice() {
std::shared_ptr<Device> DeviceManager::forTest_makeDriverDevice(const std::string& name,
const sp<V1_0::IDevice>& device) {
- const DeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
+ const HalDeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
const auto driverDevice = DriverDevice::create(name, makeDevice);
CHECK(driverDevice != nullptr);
return driverDevice;
@@ -831,7 +825,7 @@ void DeviceManager::findAvailableDevices() {
const auto names = hardware::getAllHalInstanceNames(V1_0::IDevice::descriptor);
for (const auto& name : names) {
VLOG(MANAGER) << "Found interface " << name;
- const DeviceFactory makeDevice = [name](bool blocking) {
+ const HalDeviceFactory makeDevice = [name](bool blocking) {
return blocking ? V1_0::IDevice::getService(name) : V1_0::IDevice::tryGetService(name);
};
registerDevice(name, makeDevice);
@@ -842,7 +836,7 @@ void DeviceManager::findAvailableDevices() {
mDevicesCpuOnly.push_back(CpuDevice::get());
}
-void DeviceManager::registerDevice(const std::string& name, const DeviceFactory& makeDevice) {
+void DeviceManager::registerDevice(const std::string& name, const HalDeviceFactory& makeDevice) {
if (auto device = DriverDevice::create(name, makeDevice)) {
mDevices.push_back(std::move(device));
}
diff --git a/nn/runtime/Manager.h b/nn/runtime/Manager.h
index d6d483576..3e3ce033f 100644
--- a/nn/runtime/Manager.h
+++ b/nn/runtime/Manager.h
@@ -43,40 +43,42 @@ class ModelArgumentInfo;
class VersionedIPreparedModel;
// A unified interface for actual driver prepared model as well as the CPU.
-class PreparedModel {
- DISALLOW_COPY_AND_ASSIGN(PreparedModel);
+class RuntimePreparedModel {
+ DISALLOW_COPY_AND_ASSIGN(RuntimePreparedModel);
public:
- PreparedModel() = default;
- virtual ~PreparedModel() = default;
+ RuntimePreparedModel() = default;
+ virtual ~RuntimePreparedModel() = default;
virtual const Device* getDevice() const = 0;
virtual std::shared_ptr<VersionedIPreparedModel> getInterface() const = 0;
// Perform computation with given input/output argument info and memory pools.
- virtual std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> execute(
+ virtual std::tuple<int, std::vector<OutputShape>, Timing> execute(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories,
- const std::shared_ptr<ExecutionBurstController>& burstController,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration) const = 0;
+ const std::vector<const RuntimeMemory*>& memories,
+ const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure,
+ const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration) const = 0;
// Perform fenced computation with given input/output argument info and memory pools.
// The returned timing information is only valid if the callback is nullptr.
// Returns error_code, sync_fence, callback and timing.
- virtual std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
+ virtual std::tuple<int, int, sp<V1_3::IFencedExecutionCallback>, Timing> executeFenced(
const std::vector<ModelArgumentInfo>& inputs,
const std::vector<ModelArgumentInfo>& outputs,
- const std::vector<const Memory*>& memories, const std::vector<int>& waitFor,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const = 0;
+ const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) const = 0;
virtual std::shared_ptr<ExecutionBurstController> configureExecutionBurst(
bool preferPowerOverLatency) const = 0;
};
+using ModelFactory = std::function<Model()>;
+
// A unified interface for actual driver devices as well as the CPU
class Device {
DISALLOW_COPY_AND_ASSIGN(Device);
@@ -90,29 +92,28 @@ class Device {
virtual const std::string& getVersionString() const = 0;
virtual int64_t getFeatureLevel() const = 0;
virtual int32_t getType() const = 0;
- virtual const std::vector<hal::Extension>& getSupportedExtensions() const = 0;
+ virtual const std::vector<Extension>& getSupportedExtensions() const = 0;
// See the MetaModel class in MetaModel.h for more details.
virtual std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const = 0;
- virtual hal::PerformanceInfo getPerformance(hal::OperandType type) const = 0;
- virtual hal::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const = 0;
- virtual hal::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const = 0;
- virtual hal::PerformanceInfo getIfPerformance() const = 0;
- virtual hal::PerformanceInfo getWhilePerformance() const = 0;
+ virtual Capabilities::PerformanceInfo getPerformance(OperandType type) const = 0;
+ virtual Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const = 0;
+ virtual Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const = 0;
+ virtual Capabilities::PerformanceInfo getIfPerformance() const = 0;
+ virtual Capabilities::PerformanceInfo getWhilePerformance() const = 0;
virtual bool isCachingSupported() const = 0;
virtual int wait() const = 0;
- virtual std::pair<int, std::shared_ptr<PreparedModel>> prepareModel(
- const hal::ModelFactory& makeModel, hal::ExecutionPreference preference,
- hal::Priority priority, const std::optional<Deadline>& deadline,
- const std::string& cacheDir,
- const std::optional<hal::CacheToken>& maybeToken) const = 0;
+ virtual std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
+ const ModelFactory& makeModel, ExecutionPreference preference, Priority priority,
+ const std::optional<Deadline>& deadline, const std::string& cacheDir,
+ const std::optional<CacheToken>& maybeToken) const = 0;
- // The caller is responsible for making sure the MemoryDescriptor only contains PreparedModels
- // from the same Device.
- virtual std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc,
- hal::OperandType type) const = 0;
+ // The caller is responsible for making sure the MemoryDescriptor only contains
+ // PreparedModels from the same Device.
+ virtual std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
+ OperandType type) const = 0;
};
// Manages the NN HAL devices. Only one instance of this class will exist.
@@ -168,8 +169,8 @@ class DeviceManager {
}
// Register a test device.
- void forTest_registerDevice(const std::string& name, const sp<hal::V1_0::IDevice>& device) {
- const hal::DeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
+ void forTest_registerDevice(const std::string& name, const sp<V1_0::IDevice>& device) {
+ const HalDeviceFactory makeDevice = [device](bool /*blocking*/) { return device; };
registerDevice(name, makeDevice);
}
@@ -182,7 +183,7 @@ class DeviceManager {
// Make a test device
static std::shared_ptr<Device> forTest_makeDriverDevice(const std::string& name,
- const sp<hal::V1_0::IDevice>& device);
+ const sp<V1_0::IDevice>& device);
bool forTest_isCpuDevice(const ANeuralNetworksDevice* device) const {
return reinterpret_cast<const Device*>(device) == getCpuDevice().get();
@@ -193,7 +194,7 @@ class DeviceManager {
DeviceManager();
// Adds a device for the manager to use.
- void registerDevice(const std::string& name, const hal::DeviceFactory& makeDevice);
+ void registerDevice(const std::string& name, const HalDeviceFactory& makeDevice);
void findAvailableDevices();
diff --git a/nn/runtime/Memory.cpp b/nn/runtime/Memory.cpp
index ee9faf934..7efaf643b 100644
--- a/nn/runtime/Memory.cpp
+++ b/nn/runtime/Memory.cpp
@@ -30,6 +30,8 @@
#include <utility>
#include <vector>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
#include "CompilationBuilder.h"
#include "CpuExecutor.h"
#include "ExecutionBurstController.h"
@@ -41,7 +43,7 @@
namespace android {
namespace nn {
-using namespace hal;
+using ::android::hidl::memory::V1_0::IMemory;
namespace {
@@ -183,17 +185,18 @@ class DeviceMemoryValidator : public MemoryValidatorBase {
} // namespace
-Memory::Memory(hal::hidl_memory memory)
+RuntimeMemory::RuntimeMemory(hardware::hidl_memory memory)
: kHidlMemory(std::move(memory)),
mValidator(std::make_unique<SizedMemoryValidator>(kHidlMemory.size())) {}
-Memory::Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+RuntimeMemory::RuntimeMemory(hardware::hidl_memory memory,
+ std::unique_ptr<MemoryValidatorBase> validator)
: kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {}
-Memory::Memory(sp<hal::IBuffer> buffer, uint32_t token)
+RuntimeMemory::RuntimeMemory(sp<V1_3::IBuffer> buffer, uint32_t token)
: kBuffer(std::move(buffer)), kToken(token) {}
-Memory::~Memory() {
+RuntimeMemory::~RuntimeMemory() {
for (const auto& [ptr, weakBurst] : mUsedBy) {
if (const std::shared_ptr<ExecutionBurstController> burst = weakBurst.lock()) {
burst->freeMemory(getKey());
@@ -201,8 +204,8 @@ Memory::~Memory() {
}
}
-Request::MemoryPool Memory::getMemoryPool() const {
- Request::MemoryPool pool;
+V1_3::Request::MemoryPool RuntimeMemory::getMemoryPool() const {
+ V1_3::Request::MemoryPool pool;
if (kToken > 0) {
pool.token(kToken);
} else {
@@ -211,20 +214,20 @@ Request::MemoryPool Memory::getMemoryPool() const {
return pool;
}
-std::optional<RunTimePoolInfo> Memory::getRunTimePoolInfo() const {
+std::optional<RunTimePoolInfo> RuntimeMemory::getRunTimePoolInfo() const {
std::lock_guard<std::mutex> guard(mMutex);
if (!mHasCachedRunTimePoolInfo) {
- mCachedRunTimePoolInfo = RunTimePoolInfo::createFromHidlMemory(kHidlMemory);
+ mCachedRunTimePoolInfo = RunTimePoolInfo::createFromMemory(uncheckedConvert(kHidlMemory));
mHasCachedRunTimePoolInfo = true;
}
return mCachedRunTimePoolInfo;
}
-intptr_t Memory::getKey() const {
+intptr_t RuntimeMemory::getKey() const {
return reinterpret_cast<intptr_t>(this);
}
-void Memory::usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const {
+void RuntimeMemory::usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const {
std::lock_guard<std::mutex> guard(mMutex);
mUsedBy.emplace(burst.get(), burst);
}
@@ -246,37 +249,37 @@ static int copyHidlMemories(const std::optional<RunTimePoolInfo>& src,
return ANEURALNETWORKS_NO_ERROR;
}
-int copyIBufferToHidlMemory(const sp<IBuffer>& src, const hidl_memory& dst) {
+int copyIBufferToHidlMemory(const sp<V1_3::IBuffer>& src, const hardware::hidl_memory& dst) {
const auto ret = src->copyTo(dst);
if (!ret.isOk()) {
LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description();
return ANEURALNETWORKS_OP_FAILED;
}
- return convertErrorStatusToResultCode(static_cast<ErrorStatus>(ret));
+ return convertErrorStatusToResultCode(static_cast<V1_3::ErrorStatus>(ret));
}
-int copyHidlMemoryToIBuffer(const hidl_memory& src, const sp<IBuffer>& dst,
+int copyHidlMemoryToIBuffer(const hardware::hidl_memory& src, const sp<V1_3::IBuffer>& dst,
const std::vector<uint32_t>& dimensions) {
const auto ret = dst->copyFrom(src, dimensions);
if (!ret.isOk()) {
LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description();
return ANEURALNETWORKS_OP_FAILED;
}
- return convertErrorStatusToResultCode(static_cast<ErrorStatus>(ret));
+ return convertErrorStatusToResultCode(static_cast<V1_3::ErrorStatus>(ret));
}
-static int copyIBuffers(const sp<IBuffer>& src, const sp<IBuffer>& dst,
+static int copyIBuffers(const sp<V1_3::IBuffer>& src, const sp<V1_3::IBuffer>& dst,
const MemoryValidatorBase::Metadata& srcMetadata) {
const auto [n, memory] = MemoryRuntimeAHWB::create(srcMetadata.logicalSize);
NN_RETURN_IF_ERROR(n);
- const hidl_memory& hidlMemory = memory->getHidlMemory();
+ const hardware::hidl_memory& hidlMemory = memory->getHidlMemory();
if (!hidlMemory.valid()) return ANEURALNETWORKS_OUT_OF_MEMORY;
NN_RETURN_IF_ERROR(copyIBufferToHidlMemory(src, hidlMemory));
NN_RETURN_IF_ERROR(copyHidlMemoryToIBuffer(hidlMemory, dst, srcMetadata.dimensions));
return ANEURALNETWORKS_NO_ERROR;
}
-static int copyInternal(const Memory& src, const Memory& dst) {
+static int copyInternal(const RuntimeMemory& src, const RuntimeMemory& dst) {
if (&src == &dst) return ANEURALNETWORKS_NO_ERROR;
if (!src.getValidator().isInitialized()) {
@@ -307,7 +310,7 @@ static int copyInternal(const Memory& src, const Memory& dst) {
return ANEURALNETWORKS_OP_FAILED;
}
-int Memory::copy(const Memory& src, const Memory& dst) {
+int RuntimeMemory::copy(const RuntimeMemory& src, const RuntimeMemory& dst) {
int n = copyInternal(src, dst);
dst.getValidator().setInitialized(n == ANEURALNETWORKS_NO_ERROR);
return n;
@@ -333,7 +336,7 @@ int MemoryBuilder::addRole(const CompilationBuilder& compilation, IOType ioType,
return ANEURALNETWORKS_BAD_DATA;
}
- std::vector<std::tuple<const PreparedModel*, IOType, uint32_t>> roles;
+ std::vector<std::tuple<const RuntimePreparedModel*, IOType, uint32_t>> roles;
auto callback = [&roles](const auto* preparedModel, IOType type, uint32_t index) {
roles.emplace_back(preparedModel, type, index);
};
@@ -421,10 +424,10 @@ int MemoryBuilder::setDimensions(const std::vector<uint32_t>& dimensions) {
static void logMemoryDescriptorToInfo(const MemoryDescriptor& desc, const Operand& operand) {
LOG(INFO) << "MemoryDescriptor start";
- LOG(INFO) << " Data type: " << toString(operand.type);
- LOG(INFO) << " Scale: " << toString(operand.scale);
- LOG(INFO) << " Zero point: " << toString(operand.zeroPoint);
- LOG(INFO) << " Extra params: " << toString(operand.extraParams);
+ LOG(INFO) << " Data type: " << operand.type;
+ LOG(INFO) << " Scale: " << operand.scale;
+ LOG(INFO) << " Zero point: " << operand.zeroPoint;
+ LOG(INFO) << " Extra params: " << operand.extraParams;
LOG(INFO) << " Dimensions: " << toString(desc.dimensions);
LOG(INFO) << " Prepared models [" << desc.preparedModels.size() << "]:";
for (const auto* preparedModel : desc.preparedModels) {
@@ -432,11 +435,11 @@ static void logMemoryDescriptorToInfo(const MemoryDescriptor& desc, const Operan
}
LOG(INFO) << " Input roles [" << desc.inputRoles.size() << "]:";
for (const auto& usage : desc.inputRoles) {
- LOG(INFO) << " " << toString(usage);
+ LOG(INFO) << " " << usage;
}
LOG(INFO) << " Output roles [" << desc.outputRoles.size() << "]:";
for (const auto& usage : desc.outputRoles) {
- LOG(INFO) << " " << toString(usage);
+ LOG(INFO) << " " << usage;
}
LOG(INFO) << "MemoryDescriptor end";
}
@@ -484,14 +487,14 @@ int MemoryBuilder::finish() {
return ANEURALNETWORKS_NO_ERROR;
}
-std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const {
+std::pair<int, std::unique_ptr<RuntimeMemory>> MemoryBuilder::allocate() const {
if (!mFinished) {
LOG(ERROR) << "ANeuralNetworksMemory_createFromDesc -- passed an unfinished descriptor";
return {ANEURALNETWORKS_BAD_STATE, nullptr};
}
int n = ANEURALNETWORKS_OP_FAILED;
- std::unique_ptr<Memory> memory;
+ std::unique_ptr<RuntimeMemory> memory;
CHECK(mOperand.has_value());
// Try allocate the memory on device.
@@ -521,10 +524,10 @@ std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const {
}
std::pair<int, std::unique_ptr<MemoryAshmem>> MemoryAshmem::create(uint32_t size) {
- hidl_memory hidlMemory = allocateSharedMemory(size);
+ hardware::hidl_memory hidlMemory = allocateSharedMemory(size);
sp<IMemory> mapped = mapMemory(hidlMemory);
if (mapped == nullptr || mapped->getPointer() == nullptr) {
- LOG(ERROR) << "Memory::create failed";
+ LOG(ERROR) << "RuntimeMemory::create failed";
return {ANEURALNETWORKS_OUT_OF_MEMORY, nullptr};
}
return {ANEURALNETWORKS_NO_ERROR,
@@ -535,8 +538,8 @@ uint8_t* MemoryAshmem::getPointer() const {
return static_cast<uint8_t*>(static_cast<void*>(kMappedMemory->getPointer()));
}
-MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hidl_memory memory)
- : Memory(std::move(memory)), kMappedMemory(std::move(mapped)) {}
+MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hardware::hidl_memory memory)
+ : RuntimeMemory(std::move(memory)), kMappedMemory(std::move(mapped)) {}
std::pair<int, std::unique_ptr<MemoryFd>> MemoryFd::create(size_t size, int prot, int fd,
size_t offset) {
@@ -576,25 +579,26 @@ std::pair<int, std::unique_ptr<MemoryFd>> MemoryFd::create(size_t size, int prot
// Push the hidl_handle into a hidl_memory object. The hidl_memory object is
// responsible for cleaning the hidl_handle, the native handle, and the fd.
- hidl_memory hidlMemory = hidl_memory("mmap_fd", std::move(hidlHandle), size);
+ hardware::hidl_memory hidlMemory =
+ hardware::hidl_memory("mmap_fd", std::move(hidlHandle), size);
return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(hidlMemory))};
}
-MemoryFd::MemoryFd(hidl_memory memory) : Memory(std::move(memory)) {}
+MemoryFd::MemoryFd(hardware::hidl_memory memory) : RuntimeMemory(std::move(memory)) {}
std::pair<int, std::unique_ptr<MemoryAHWB>> MemoryAHWB::create(const AHardwareBuffer& ahwb) {
AHardwareBuffer_Desc bufferDesc;
AHardwareBuffer_describe(&ahwb, &bufferDesc);
const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb);
- hidl_memory hidlMemory;
+ hardware::hidl_memory hidlMemory;
std::unique_ptr<MemoryValidatorBase> validator;
if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
- hidlMemory = hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
+ hidlMemory = hardware::hidl_memory("hardware_buffer_blob", handle, bufferDesc.width);
validator = std::make_unique<SizedMemoryValidator>(bufferDesc.width);
} else {
// memory size is not used.
- hidlMemory = hidl_memory("hardware_buffer", handle, 0);
+ hidlMemory = hardware::hidl_memory("hardware_buffer", handle, 0);
validator = std::make_unique<AHardwareBufferNonBlobValidator>();
}
auto memory = std::make_unique<MemoryAHWB>(std::move(hidlMemory), std::move(validator));
@@ -633,7 +637,8 @@ std::pair<int, std::unique_ptr<MemoryRuntimeAHWB>> MemoryRuntimeAHWB::create(uin
return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
- hidl_memory hidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
+ hardware::hidl_memory hidlMemory =
+ hardware::hidl_memory("hardware_buffer_blob", handle, desc.width);
auto memory = std::make_unique<MemoryRuntimeAHWB>(std::move(hidlMemory), ahwb,
static_cast<uint8_t*>(buffer));
allocateGuard.Disable();
@@ -641,9 +646,9 @@ std::pair<int, std::unique_ptr<MemoryRuntimeAHWB>> MemoryRuntimeAHWB::create(uin
return {ANEURALNETWORKS_NO_ERROR, std::move(memory)};
}
-MemoryRuntimeAHWB::MemoryRuntimeAHWB(hal::hidl_memory memory, AHardwareBuffer* ahwb,
+MemoryRuntimeAHWB::MemoryRuntimeAHWB(hardware::hidl_memory memory, AHardwareBuffer* ahwb,
uint8_t* buffer)
- : Memory(std::move(memory)), mAhwb(ahwb), mBuffer(buffer) {
+ : RuntimeMemory(std::move(memory)), mAhwb(ahwb), mBuffer(buffer) {
CHECK(mAhwb != nullptr);
CHECK(mBuffer != nullptr);
}
@@ -653,7 +658,7 @@ MemoryRuntimeAHWB::~MemoryRuntimeAHWB() {
AHardwareBuffer_release(mAhwb);
}
-std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<hal::IBuffer> buffer,
+std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<V1_3::IBuffer> buffer,
uint32_t token) {
if (buffer == nullptr) {
LOG(ERROR) << "nullptr IBuffer for device memory.";
@@ -666,8 +671,8 @@ std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<ha
return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)};
};
-MemoryFromDevice::MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token)
- : Memory(std::move(buffer), token) {}
+MemoryFromDevice::MemoryFromDevice(sp<V1_3::IBuffer> buffer, uint32_t token)
+ : RuntimeMemory(std::move(buffer), token) {}
} // namespace nn
} // namespace android
diff --git a/nn/runtime/Memory.h b/nn/runtime/Memory.h
index 56bf81dcd..f78ef8088 100644
--- a/nn/runtime/Memory.h
+++ b/nn/runtime/Memory.h
@@ -39,11 +39,13 @@
namespace android {
namespace nn {
+using ::android::hidl::memory::V1_0::IMemory;
+
class CompilationBuilder;
class Device;
class ExecutionBurstController;
class ModelBuilder;
-class PreparedModel;
+class RuntimePreparedModel;
// A utility template class to accumulate multiple objects and assign each
// a distinct index number, starting with 0.
@@ -93,12 +95,12 @@ class ObjectTracker {
};
using CompilationRole = std::tuple<const CompilationBuilder*, IOType, uint32_t>;
-using StepRoleCallback = std::function<void(const PreparedModel*, IOType, uint32_t)>;
+using StepRoleCallback = std::function<void(const RuntimePreparedModel*, IOType, uint32_t)>;
struct MemoryDescriptor {
std::vector<uint32_t> dimensions;
- ObjectTracker<PreparedModel> preparedModels;
- std::vector<hal::BufferRole> inputRoles, outputRoles;
+ ObjectTracker<RuntimePreparedModel> preparedModels;
+ std::vector<BufferRole> inputRoles, outputRoles;
};
class MemoryValidatorBase {
@@ -144,7 +146,7 @@ class MemoryValidatorBase {
// The data type, scale, zero point, and extra parameters of the target operand.
// Other fields will be ignored, including dimensions, lifetime, location, etc.
// Set to std::nullopt if undefined.
- std::optional<hal::Operand> operand;
+ std::optional<Operand> operand;
};
virtual Metadata getMetadata() const = 0;
@@ -158,24 +160,24 @@ class MemoryValidatorBase {
virtual bool isInitialized() const { return true; }
};
-int copyIBufferToHidlMemory(const sp<hal::IBuffer>& src, const hal::hidl_memory& dst);
+int copyIBufferToHidlMemory(const sp<V1_3::IBuffer>& src, const hardware::hidl_memory& dst);
-int copyHidlMemoryToIBuffer(const hal::hidl_memory& src, const sp<hal::IBuffer>& dst,
+int copyHidlMemoryToIBuffer(const hardware::hidl_memory& src, const sp<V1_3::IBuffer>& dst,
const std::vector<uint32_t>& dimensions);
// Represents a memory region.
-class Memory {
+class RuntimeMemory {
// Disallow copy and assign to prevent slicing
- DISALLOW_COPY_AND_ASSIGN(Memory);
+ DISALLOW_COPY_AND_ASSIGN(RuntimeMemory);
public:
// Custom destructor to notify any ExecutionBurstControllers currently using
// this memory that it is being freed.
- virtual ~Memory();
+ virtual ~RuntimeMemory();
- hal::Request::MemoryPool getMemoryPool() const;
- const hal::hidl_memory& getHidlMemory() const { return kHidlMemory; }
- const sp<hal::IBuffer>& getIBuffer() const { return kBuffer; }
+ V1_3::Request::MemoryPool getMemoryPool() const;
+ const hardware::hidl_memory& getHidlMemory() const { return kHidlMemory; }
+ const sp<V1_3::IBuffer>& getIBuffer() const { return kBuffer; }
virtual uint32_t getSize() const { return getHidlMemory().size(); }
virtual std::optional<RunTimePoolInfo> getRunTimePoolInfo() const;
@@ -196,24 +198,24 @@ class Memory {
// the bursts' memory cache.
void usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const;
- static int copy(const Memory& src, const Memory& dst);
+ static int copy(const RuntimeMemory& src, const RuntimeMemory& dst);
protected:
- Memory(hal::hidl_memory memory);
- Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
- Memory(sp<hal::IBuffer> buffer, uint32_t token);
+ RuntimeMemory(hardware::hidl_memory memory);
+ RuntimeMemory(hardware::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator);
+ RuntimeMemory(sp<V1_3::IBuffer> buffer, uint32_t token);
// The HIDL representation for this memory. We will use one of the following values
// when communicating with the drivers.
- const hal::hidl_memory kHidlMemory;
- const sp<hal::IBuffer> kBuffer;
+ const hardware::hidl_memory kHidlMemory;
+ const sp<V1_3::IBuffer> kBuffer;
const uint32_t kToken = 0;
std::unique_ptr<MemoryValidatorBase> mValidator;
private:
mutable std::mutex mMutex;
- // mUsedBy is essentially a set of burst objects which use this Memory
+ // mUsedBy is essentially a set of burst objects which use this RuntimeMemory
// object. However, std::weak_ptr does not have comparison operations nor a
// std::hash implementation. This is because it is either a valid pointer
// (non-null) if the shared object is still alive, or it is null if the
@@ -238,7 +240,7 @@ class MemoryBuilder {
int finish();
- std::pair<int, std::unique_ptr<Memory>> allocate() const;
+ std::pair<int, std::unique_ptr<RuntimeMemory>> allocate() const;
private:
bool badState(const char* name) const;
@@ -253,7 +255,7 @@ class MemoryBuilder {
// Keep track of the data type, scale, zero point, and extra parameters of the target operand.
// Other fields will be ignored, including dimensions, lifetime, location, etc.
// It is std::nullopt if no usage has been specified yet.
- std::optional<hal::Operand> mOperand;
+ std::optional<Operand> mOperand;
// Once the descriptor has been finished, we should not allow further modifications.
bool mFinished = false;
@@ -271,7 +273,7 @@ class MemoryBuilder {
bool mShouldFallback = true;
};
-class MemoryAshmem : public Memory {
+class MemoryAshmem : public RuntimeMemory {
public:
// Creates a memory object containing a new android shared memory ("ashmem")
// object of the size specified in bytes. Because this ashmem region can be
@@ -292,13 +294,13 @@ class MemoryAshmem : public Memory {
}
// prefer using MemoryAshmem::create
- MemoryAshmem(sp<hal::IMemory> mapped, hal::hidl_memory memory);
+ MemoryAshmem(sp<IMemory> mapped, hardware::hidl_memory memory);
private:
- const sp<hal::IMemory> kMappedMemory;
+ const sp<IMemory> kMappedMemory;
};
-class MemoryFd : public Memory {
+class MemoryFd : public RuntimeMemory {
public:
// Create a memory object based on input size, prot, and fd that can be sent
// across HIDL. This function duplicates the provided fd, and owns the
@@ -310,10 +312,10 @@ class MemoryFd : public Memory {
size_t offset);
// prefer using MemoryFd::create
- MemoryFd(hal::hidl_memory memory);
+ MemoryFd(hardware::hidl_memory memory);
};
-class MemoryAHWB : public Memory {
+class MemoryAHWB : public RuntimeMemory {
public:
// Create a memory object to keep track of (but not take ownership of) the
// provided AHardwareBuffer handle.
@@ -323,11 +325,11 @@ class MemoryAHWB : public Memory {
static std::pair<int, std::unique_ptr<MemoryAHWB>> create(const AHardwareBuffer& ahwb);
// prefer using MemoryAHWB::create
- MemoryAHWB(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
- : Memory(std::move(memory), std::move(validator)) {}
+ MemoryAHWB(hardware::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator)
+ : RuntimeMemory(std::move(memory), std::move(validator)) {}
};
-class MemoryRuntimeAHWB : public Memory {
+class MemoryRuntimeAHWB : public RuntimeMemory {
public:
// Create a memory object containing a new BLOB-mode AHardwareBuffer memory
// object of the size specified in bytes. The created memory is managed and
@@ -347,7 +349,7 @@ class MemoryRuntimeAHWB : public Memory {
}
// prefer using MemoryRuntimeAHWB::create
- MemoryRuntimeAHWB(hal::hidl_memory memory, AHardwareBuffer* ahwb, uint8_t* buffer);
+ MemoryRuntimeAHWB(hardware::hidl_memory memory, AHardwareBuffer* ahwb, uint8_t* buffer);
~MemoryRuntimeAHWB();
private:
@@ -355,21 +357,21 @@ class MemoryRuntimeAHWB : public Memory {
uint8_t* const mBuffer;
};
-class MemoryFromDevice : public Memory {
+class MemoryFromDevice : public RuntimeMemory {
public:
// Create a memory object to keep track of a driver-allocated device memory.
// The memory is recognized by the driver via a token.
//
// On success, returns ANEURALNETWORKS_NO_ERROR and a memory object.
// On error, returns the appropriate NNAPI error code and nullptr.
- static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<hal::IBuffer> buffer,
+ static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<V1_3::IBuffer> buffer,
uint32_t token);
// prefer using MemoryFromDevice::create
- MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token);
+ MemoryFromDevice(sp<V1_3::IBuffer> buffer, uint32_t token);
};
-using MemoryTracker = ObjectTracker<Memory>;
+using MemoryTracker = ObjectTracker<RuntimeMemory>;
} // namespace nn
} // namespace android
diff --git a/nn/runtime/ModelArgumentInfo.cpp b/nn/runtime/ModelArgumentInfo.cpp
index cf2400475..a6a8908b2 100644
--- a/nn/runtime/ModelArgumentInfo.cpp
+++ b/nn/runtime/ModelArgumentInfo.cpp
@@ -30,8 +30,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
static const std::pair<int, ModelArgumentInfo> kBadDataModelArgumentInfo{ANEURALNETWORKS_BAD_DATA,
{}};
@@ -98,33 +96,33 @@ int ModelArgumentInfo::updateDimensionInfo(const Operand& operand,
mDimensions = operand.dimensions;
} else {
const uint32_t count = newType->dimensionCount;
- mDimensions = hidl_vec<uint32_t>(count);
+ mDimensions = std::vector<uint32_t>(count);
std::copy(&newType->dimensions[0], &newType->dimensions[count], mDimensions.begin());
}
return ANEURALNETWORKS_NO_ERROR;
}
-hidl_vec<RequestArgument> createRequestArguments(
+std::vector<Request::Argument> createRequestArguments(
const std::vector<ModelArgumentInfo>& argumentInfos,
const std::vector<DataLocation>& ptrArgsLocations) {
const size_t count = argumentInfos.size();
- hidl_vec<RequestArgument> ioInfos(count);
+ std::vector<Request::Argument> ioInfos(count);
uint32_t ptrArgsIndex = 0;
for (size_t i = 0; i < count; i++) {
const auto& info = argumentInfos[i];
switch (info.state()) {
case ModelArgumentInfo::POINTER:
- ioInfos[i] = {.hasNoValue = false,
+ ioInfos[i] = {.lifetime = Request::Argument::LifeTime::POOL,
.location = ptrArgsLocations[ptrArgsIndex++],
.dimensions = info.dimensions()};
break;
case ModelArgumentInfo::MEMORY:
- ioInfos[i] = {.hasNoValue = false,
+ ioInfos[i] = {.lifetime = Request::Argument::LifeTime::POOL,
.location = info.locationAndLength(),
.dimensions = info.dimensions()};
break;
case ModelArgumentInfo::HAS_NO_VALUE:
- ioInfos[i] = {.hasNoValue = true};
+ ioInfos[i] = {.lifetime = Request::Argument::LifeTime::NO_VALUE};
break;
default:
CHECK(false);
diff --git a/nn/runtime/ModelArgumentInfo.h b/nn/runtime/ModelArgumentInfo.h
index 22dd34cb0..d0e2bb038 100644
--- a/nn/runtime/ModelArgumentInfo.h
+++ b/nn/runtime/ModelArgumentInfo.h
@@ -20,7 +20,6 @@
#include <utility>
#include <vector>
-#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "Utils.h"
@@ -38,10 +37,10 @@ class ModelArgumentInfo {
ModelArgumentInfo() {}
static std::pair<int, ModelArgumentInfo> createFromPointer(
- const hal::Operand& operand, const ANeuralNetworksOperandType* type,
+ const Operand& operand, const ANeuralNetworksOperandType* type,
void* data /* nullptr means HAS_NO_VALUE */, uint32_t length);
static std::pair<int, ModelArgumentInfo> createFromMemory(
- const hal::Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex,
+ const Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex,
uint32_t offset, uint32_t length);
enum State { POINTER, MEMORY, HAS_NO_VALUE, UNSPECIFIED };
@@ -78,17 +77,17 @@ class ModelArgumentInfo {
return mLocationAndLength.length;
}
- const hal::DataLocation& locationAndLength() const {
+ const DataLocation& locationAndLength() const {
CHECK_EQ(mState, MEMORY);
return mLocationAndLength;
}
- hal::DataLocation& locationAndLength() {
+ DataLocation& locationAndLength() {
CHECK_EQ(mState, MEMORY);
return mLocationAndLength;
}
private:
- int updateDimensionInfo(const hal::Operand& operand, const ANeuralNetworksOperandType* newType);
+ int updateDimensionInfo(const Operand& operand, const ANeuralNetworksOperandType* newType);
// Whether the argument was specified as being in a Memory, as a pointer,
// has no value, or has not been specified.
@@ -101,16 +100,16 @@ class ModelArgumentInfo {
// mDimensions is valid.
State mState = UNSPECIFIED; // fixed at creation
void* mBuffer = nullptr; // fixed at creation
- hal::DataLocation mLocationAndLength; // can be updated after creation
+ DataLocation mLocationAndLength; // can be updated after creation
std::vector<uint32_t> mDimensions; // can be updated after creation
bool mIsSufficient = true; // can be updated after creation
};
-// Convert ModelArgumentInfo to HIDL RequestArgument. For pointer arguments, use the location
+// Convert ModelArgumentInfo to HIDL Request::Argument. For pointer arguments, use the location
// information in ptrArgsLocations.
-hal::hidl_vec<hal::RequestArgument> createRequestArguments(
+std::vector<Request::Argument> createRequestArguments(
const std::vector<ModelArgumentInfo>& argumentInfos,
- const std::vector<hal::DataLocation>& ptrArgsLocations);
+ const std::vector<DataLocation>& ptrArgsLocations);
} // namespace nn
} // namespace android
diff --git a/nn/runtime/ModelBuilder.cpp b/nn/runtime/ModelBuilder.cpp
index ab63f62d1..0c506d5f3 100644
--- a/nn/runtime/ModelBuilder.cpp
+++ b/nn/runtime/ModelBuilder.cpp
@@ -35,8 +35,6 @@
namespace android {
namespace nn {
-using namespace hal;
-
// The maximum number of operands and operations that a model may have.
const uint32_t MAX_NUMBER_OF_OPERANDS = 0xFFFFFFFE;
const uint32_t MAX_NUMBER_OF_OPERATIONS = 0xFFFFFFFE;
@@ -66,7 +64,7 @@ int ModelBuilder::addOperand(const ANeuralNetworksOperandType& type) {
}
OperandType operandType = static_cast<OperandType>(type.type);
- if (isExtensionOperandType(operandType) && !TypeManager::get()->areExtensionsAllowed()) {
+ if (isExtension(operandType) && !TypeManager::get()->areExtensionsAllowed()) {
LOG(ERROR) << "Extensions are not supported for this process.";
return ANEURALNETWORKS_BAD_DATA;
}
@@ -77,9 +75,9 @@ int ModelBuilder::addOperand(const ANeuralNetworksOperandType& type) {
}
const Extension::OperandTypeInformation* info = nullptr;
- if (isExtensionOperandType(operandType) &&
+ if (isExtension(operandType) &&
!TypeManager::get()->getExtensionOperandTypeInfo(operandType, &info)) {
- LOG(ERROR) << "Extension operand type " << toString(operandType) << " is not registered";
+ LOG(ERROR) << "Extension operand type " << operandType << " is not registered";
return ANEURALNETWORKS_BAD_DATA;
}
NN_RETURN_IF_ERROR(validateOperandType(type, info, "ANeuralNetworksModel_addOperand", true));
@@ -92,13 +90,12 @@ int ModelBuilder::addOperand(const ANeuralNetworksOperandType& type) {
mOperands.push_back({
.type = operandType,
.dimensions =
- hidl_vec<uint32_t>(type.dimensions, type.dimensions + type.dimensionCount),
- .numberOfConsumers = 0,
+ std::vector<uint32_t>(type.dimensions, type.dimensions + type.dimensionCount),
.scale = type.scale,
.zeroPoint = type.zeroPoint,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .lifetime = Operand::LifeTime::TEMPORARY_VARIABLE,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
- .extraParams = OperandExtraParams(),
+ .extraParams = {},
});
mHasOEMOperand |= isOemOperand;
return ANEURALNETWORKS_NO_ERROR;
@@ -122,7 +119,7 @@ int ModelBuilder::setOperandValue(uint32_t index, const void* buffer, size_t len
"not 0";
return ANEURALNETWORKS_BAD_DATA;
}
- operand.lifetime = OperandLifeTime::NO_VALUE;
+ operand.lifetime = Operand::LifeTime::NO_VALUE;
// The location is unused and is set to zeros.
operand.location = {.poolIndex = 0, .offset = 0, .length = 0};
} else {
@@ -150,14 +147,14 @@ int ModelBuilder::setOperandValue(uint32_t index, const void* buffer, size_t len
uint32_t existingSize = static_cast<uint32_t>(mSmallOperandValues.size());
uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength);
mSmallOperandValues.resize(existingSize + extraBytes + valueLength);
- operand.lifetime = OperandLifeTime::CONSTANT_COPY;
+ operand.lifetime = Operand::LifeTime::CONSTANT_COPY;
operand.location = {
.poolIndex = 0, .offset = existingSize + extraBytes, .length = valueLength};
memcpy(&mSmallOperandValues[operand.location.offset], buffer, valueLength);
VLOG(MODEL) << "Copied small value to offset " << operand.location.offset;
} else {
VLOG(MODEL) << "Saving large value";
- operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
+ operand.lifetime = Operand::LifeTime::CONSTANT_REFERENCE;
// The values for poolIndex and offset will be set when the model is finished.
typedef decltype(operand.location.poolIndex) PoolIndexType;
typedef decltype(operand.location.offset) OffsetType;
@@ -191,7 +188,7 @@ int ModelBuilder::setOperandValueFromModel(uint32_t index, const ModelBuilder* v
return ANEURALNETWORKS_BAD_DATA;
}
Operand& operand = mOperands[index];
- operand.lifetime = OperandLifeTime::SUBGRAPH;
+ operand.lifetime = Operand::LifeTime::SUBGRAPH;
operand.location = {
.poolIndex = 0,
.offset = static_cast<uint32_t>(mReferencedModels.size()),
@@ -216,17 +213,17 @@ int ModelBuilder::setOperandSymmPerChannelQuantParams(
Operand& operand = mOperands[index];
if (!validateOperandSymmPerChannelQuantParams(
- operand, channelQuant,
+ convertToV1_3(operand), channelQuant,
"ANeuralNetworksModel_setOperandSymmPerChannelQuantParams")) {
return ANEURALNETWORKS_BAD_DATA;
}
switch (operand.type) {
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
- operand.extraParams.channelQuant({
- .scales = hidl_vec<float>(channelQuant.scales,
- channelQuant.scales + channelQuant.scaleCount),
+ operand.extraParams = Operand::SymmPerChannelQuantParams{
+ .scales = std::vector<float>(channelQuant.scales,
+ channelQuant.scales + channelQuant.scaleCount),
.channelDim = channelQuant.channelDim,
- });
+ };
break;
default:
LOG(ERROR) << "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams "
@@ -258,7 +255,7 @@ int ModelBuilder::setOperandExtensionData(uint32_t index, const void* data, size
<< "is zero";
return ANEURALNETWORKS_BAD_DATA;
}
- if (!isExtensionOperandType(operand.type)) {
+ if (!isExtension(operand.type)) {
LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData "
<< "setting extension data for a base operand type "
<< static_cast<int32_t>(operand.type);
@@ -266,11 +263,11 @@ int ModelBuilder::setOperandExtensionData(uint32_t index, const void* data, size
}
if (data == nullptr) {
- operand.extraParams.none();
+ operand.extraParams = {};
} else {
- operand.extraParams.extension(
- hidl_vec<uint8_t>(reinterpret_cast<const uint8_t*>(data),
- reinterpret_cast<const uint8_t*>(data) + length));
+ operand.extraParams = Operand::ExtensionParams(
+ std::vector<uint8_t>(reinterpret_cast<const uint8_t*>(data),
+ reinterpret_cast<const uint8_t*>(data) + length));
}
return ANEURALNETWORKS_NO_ERROR;
}
@@ -283,7 +280,7 @@ int ModelBuilder::copyLargeValuesToSharedMemory() {
size_t poolSize = 0;
for (LargeValue& l : mLargeOperandValues) {
Operand& operand = mOperands[l.operandIndex];
- nnAssert(operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE);
+ CHECK_EQ(operand.lifetime, Operand::LifeTime::CONSTANT_REFERENCE);
poolSize += alignBytesNeeded(poolSize, operand.location.length);
operand.location.offset = poolSize;
poolSize += operand.location.length;
@@ -308,8 +305,8 @@ int ModelBuilder::copyLargeValuesToSharedMemory() {
return ANEURALNETWORKS_NO_ERROR;
}
-int ModelBuilder::setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
- size_t length) {
+int ModelBuilder::setOperandValueFromMemory(uint32_t index, const RuntimeMemory* memory,
+ uint32_t offset, size_t length) {
VLOG(MODEL) << __func__ << " for operand " << index << " offset " << offset << " size "
<< length;
if (badState("setOperandValueFromMemory")) {
@@ -339,7 +336,7 @@ int ModelBuilder::setOperandValueFromMemory(uint32_t index, const Memory* memory
nullptr, offset, length)) {
return ANEURALNETWORKS_BAD_DATA;
}
- operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
+ operand.lifetime = Operand::LifeTime::CONSTANT_REFERENCE;
operand.location = {.poolIndex = mMemories.add(memory),
.offset = offset,
.length = static_cast<uint32_t>(length)};
@@ -354,7 +351,7 @@ int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t input
}
OperationType operationType = static_cast<OperationType>(type);
- if (isExtensionOperationType(operationType) && !TypeManager::get()->areExtensionsAllowed()) {
+ if (isExtension(operationType) && !TypeManager::get()->areExtensionsAllowed()) {
LOG(ERROR) << "Extensions are not supported for this process.";
return ANEURALNETWORKS_BAD_DATA;
}
@@ -362,7 +359,7 @@ int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t input
LOG(WARNING) << "OEM_OPERATION is deprecated. Use Extensions instead.";
}
- if (!isExtensionOperationType(operationType)) {
+ if (!isExtension(operationType)) {
if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM, type)) {
LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operation type " << type;
return ANEURALNETWORKS_BAD_DATA;
@@ -370,8 +367,8 @@ int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t input
}
auto isValidSubgraphReference = [this](const Operand& modelOperand) -> bool {
- NN_RET_CHECK(modelOperand.type == OperandType::SUBGRAPH)
- << "Unexpected operand type: " << toString(modelOperand.type);
+ NN_RET_CHECK_EQ(modelOperand.type, OperandType::SUBGRAPH)
+ << "Unexpected operand type: " << modelOperand.type;
NN_RET_CHECK_LT(modelOperand.location.offset, referencedModelCount())
<< "Invalid subgraph model reference";
return true;
@@ -405,14 +402,11 @@ int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t input
mOperations.push_back({
.type = operationType,
- .inputs = hidl_vec<uint32_t>(inputs, inputs + inputCount),
- .outputs = hidl_vec<uint32_t>(outputs, outputs + outputCount),
+ .inputs = std::vector<uint32_t>(inputs, inputs + inputCount),
+ .outputs = std::vector<uint32_t>(outputs, outputs + outputCount),
});
- for (uint32_t i : mOperations.back().inputs) {
- mOperands[i].numberOfConsumers++;
- }
mHasOEMOperation |= (operationType == OperationType::OEM_OPERATION);
- mHasExtensionOperation |= isExtensionOperationType(operationType);
+ mHasExtensionOperation |= isExtension(operationType);
return ANEURALNETWORKS_NO_ERROR;
}
@@ -437,7 +431,7 @@ int ModelBuilder::identifyInputsAndOutputs(uint32_t inputCount, const uint32_t*
// Makes a copy of the index list, validates the arguments, and changes
// the lifetime info of the corresponding operand.
auto setArguments = [&](std::vector<uint32_t>* indexVector, uint32_t indexCount,
- const uint32_t* indexList, OperandLifeTime lifetime) -> bool {
+ const uint32_t* indexList, Operand::LifeTime lifetime) -> bool {
indexVector->resize(indexCount);
for (uint32_t i = 0; i < indexCount; i++) {
const uint32_t operandIndex = indexList[i];
@@ -451,7 +445,7 @@ int ModelBuilder::identifyInputsAndOutputs(uint32_t inputCount, const uint32_t*
}
(*indexVector)[i] = operandIndex;
Operand& operand = mOperands[operandIndex];
- if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE) {
+ if (operand.lifetime != Operand::LifeTime::TEMPORARY_VARIABLE) {
LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set operand "
<< operandIndex
<< " to be an input or output. Check that it's not a constant or "
@@ -463,8 +457,8 @@ int ModelBuilder::identifyInputsAndOutputs(uint32_t inputCount, const uint32_t*
return true;
};
- if (!setArguments(&mInputIndexes, inputCount, inputs, OperandLifeTime::SUBGRAPH_INPUT) ||
- !setArguments(&mOutputIndexes, outputCount, outputs, OperandLifeTime::SUBGRAPH_OUTPUT)) {
+ if (!setArguments(&mInputIndexes, inputCount, inputs, Operand::LifeTime::SUBGRAPH_INPUT) ||
+ !setArguments(&mOutputIndexes, outputCount, outputs, Operand::LifeTime::SUBGRAPH_OUTPUT)) {
return ANEURALNETWORKS_BAD_DATA;
}
@@ -523,8 +517,8 @@ int ModelBuilder::finish() {
// NOTE: Must copyLargeValuesToSharedMemory() before validation; otherwise,
// a CONSTANT_REFERENCE operand will not have correct .poolIndex, and
// validation will not work properly.
- const Model modelForValidation = makeHidlModel();
- if (!validateModel(modelForValidation, ValidationMode::RUNTIME)) {
+ const Model modelForValidation = makeModel();
+ if (!validateModel(convertToV1_3(modelForValidation), ValidationMode::RUNTIME)) {
LOG(ERROR) << "ANeuralNetworksModel_finish called on invalid model";
mInvalidModel = true;
return ANEURALNETWORKS_BAD_DATA;
@@ -542,12 +536,12 @@ int ModelBuilder::finish() {
static void logRemoval(const Operation& operation, uint32_t count,
const std::vector<Operand>& operands) {
std::ostringstream message;
- message << "Operation " << toString(operation.type) << " with inputs {";
+ message << "Operation " << operation.type << " with inputs {";
for (uint32_t i = 0; i < operation.inputs.size(); ++i) {
if (i != 0) {
message << ", ";
}
- message << toString(operands[operation.inputs[i]].type);
+ message << operands[operation.inputs[i]].type;
}
message << "} has trailing optional inputs set to default values. Removing " << count
<< " trailing inputs.";
@@ -566,9 +560,6 @@ void ModelBuilder::removeTrailingArgumentsWithDefaultValues() {
const uint32_t inputCount = operation.inputs.size();
CHECK_LT(count, inputCount);
const uint32_t newInputCount = inputCount - count;
- for (uint32_t i = newInputCount; i < inputCount; ++i) {
- --mOperands[operation.inputs[i]].numberOfConsumers;
- }
operation.inputs.resize(newInputCount);
}
}
@@ -583,12 +574,16 @@ enum class TailSpec {
// See countMatchingTrailingArguments().
static bool matchesSpec(TailSpec spec, const Operand& operand,
const std::vector<uint8_t>& mSmallOperandValues) {
- if (operand.lifetime != OperandLifeTime::CONSTANT_COPY) {
+ const void* valuePtr = nullptr;
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
+ valuePtr = static_cast<const void*>(&mSmallOperandValues[operand.location.offset]);
+ } else if (operand.lifetime == Operand::LifeTime::POINTER) {
+ valuePtr = std::get<const void*>(operand.location.pointer);
+ } else {
// CONSTANT_REFERENCE operands are not supported to avoid mapping memory
// during compilation.
return false;
}
- auto valuePtr = static_cast<const void*>(&mSmallOperandValues[operand.location.offset]);
switch (spec) {
case TailSpec::BOOL_FALSE:
return operand.type == OperandType::BOOL &&
@@ -818,8 +813,8 @@ bool ModelBuilder::sortIntoRunOrder() {
count = 0;
for (uint32_t operandIndex : mOperations[operationIndex].inputs) {
auto lifetime = mOperands[operandIndex].lifetime;
- if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
- lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE ||
+ lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) {
count++;
operandToOperations.insert(
std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
@@ -865,38 +860,38 @@ bool ModelBuilder::sortIntoRunOrder() {
return true;
}
-// A helper class to simplify state management when creating a HIDL model.
-class ModelBuilder::HidlModelMaker {
+// A helper class to simplify state management when creating a Model.
+class ModelBuilder::ModelMaker {
public:
static Model run(const ModelBuilder* model);
private:
- static Subgraph makeSubgraph(const ModelBuilder* model);
- HidlModelMaker() {}
- Model makeHidlModel(const ModelBuilder* mainModel);
+ static Model::Subgraph makeSubgraph(const ModelBuilder* model);
+ ModelMaker() {}
+ Model makeModel(const ModelBuilder* mainModel);
uint32_t addSubgraph(const ModelBuilder* refModel);
- void updateOperandLocations(const ModelBuilder* refModel, Subgraph* subgraph);
+ void updateOperandLocations(const ModelBuilder* refModel, Model::Subgraph* subgraph);
void addExtensions(const ModelBuilder* model);
void addExtensionWithPrefix(uint16_t prefix);
- std::vector<Subgraph> mRefSubgraphs;
- std::vector<uint8_t> mOperandValues;
+ std::vector<Model::Subgraph> mRefSubgraphs;
+ Model::OperandValues mOperandValues;
MemoryTracker mMemories;
- std::vector<ExtensionNameAndPrefix> mExtensionNameToPrefix;
+ std::vector<Model::ExtensionNameAndPrefix> mExtensionNameToPrefix;
std::set<uint16_t> mPrefixSet;
};
-Model ModelBuilder::makeHidlModel() const {
- // TODO: Cache the HIDL model to speed up subsequent calls.
- return HidlModelMaker::run(this);
+Model ModelBuilder::makeModel() const {
+ // TODO: Cache the Model to speed up subsequent calls.
+ return ModelMaker::run(this);
}
-Model ModelBuilder::HidlModelMaker::run(const ModelBuilder* model) {
- // run() ensures the state of HidlModelMaker is destroyed after the call.
- return HidlModelMaker().makeHidlModel(model);
+Model ModelBuilder::ModelMaker::run(const ModelBuilder* model) {
+ // run() ensures the state of ModelMaker is destroyed after the call.
+ return ModelMaker().makeModel(model);
}
-Model ModelBuilder::HidlModelMaker::makeHidlModel(const ModelBuilder* mainModel) {
+Model ModelBuilder::ModelMaker::makeModel(const ModelBuilder* mainModel) {
addExtensions(mainModel);
Model model;
model.main = makeSubgraph(mainModel);
@@ -905,14 +900,14 @@ Model ModelBuilder::HidlModelMaker::makeHidlModel(const ModelBuilder* mainModel)
model.operandValues = std::move(mOperandValues);
model.pools.resize(mMemories.size());
std::transform(mMemories.begin(), mMemories.end(), model.pools.begin(),
- [](const Memory* m) { return m->getHidlMemory(); });
+ [](const RuntimeMemory* m) { return uncheckedConvert(m->getHidlMemory()); });
model.relaxComputationFloat32toFloat16 = mainModel->mRelaxComputationFloat32toFloat16;
model.extensionNameToPrefix = std::move(mExtensionNameToPrefix);
return model;
}
-Subgraph ModelBuilder::HidlModelMaker::makeSubgraph(const ModelBuilder* model) {
- Subgraph subgraph;
+Model::Subgraph ModelBuilder::ModelMaker::makeSubgraph(const ModelBuilder* model) {
+ Model::Subgraph subgraph;
subgraph.operands = model->mOperands;
subgraph.operations = model->mOperations;
subgraph.inputIndexes = model->mInputIndexes;
@@ -920,27 +915,22 @@ Subgraph ModelBuilder::HidlModelMaker::makeSubgraph(const ModelBuilder* model) {
return subgraph;
}
-void ModelBuilder::HidlModelMaker::updateOperandLocations(const ModelBuilder* refModel,
- Subgraph* subgraph) {
+void ModelBuilder::ModelMaker::updateOperandLocations(const ModelBuilder* refModel,
+ Model::Subgraph* subgraph) {
for (Operand& operand : subgraph->operands) {
- if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) {
+ if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) {
uint32_t valueLength = operand.location.length;
- uint32_t existingSize = mOperandValues.size();
- uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength);
uint32_t originalOffset = operand.location.offset;
- uint32_t offset = existingSize + extraBytes;
- mOperandValues.resize(offset + valueLength);
- memcpy(&mOperandValues[offset], &refModel->mSmallOperandValues[originalOffset],
- valueLength);
- operand.location.offset = offset;
- } else if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
+ operand.location = mOperandValues.append(&refModel->mSmallOperandValues[originalOffset],
+ valueLength);
+ } else if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE) {
uint32_t originalPoolIndex = operand.location.poolIndex;
operand.location.poolIndex = mMemories.add(refModel->mMemories[originalPoolIndex]);
}
}
// Do recursive calls at the end to improve locality of mOperandValues.
for (Operand& operand : subgraph->operands) {
- if (operand.lifetime == OperandLifeTime::SUBGRAPH) {
+ if (operand.lifetime == Operand::LifeTime::SUBGRAPH) {
uint32_t refModelIndex = operand.location.offset;
// TODO(b/147875885): Avoid creating duplicate refSubgraphs when
// a single refModel is referenced multiple times.
@@ -949,23 +939,22 @@ void ModelBuilder::HidlModelMaker::updateOperandLocations(const ModelBuilder* re
}
}
-uint32_t ModelBuilder::HidlModelMaker::addSubgraph(const ModelBuilder* refModel) {
+uint32_t ModelBuilder::ModelMaker::addSubgraph(const ModelBuilder* refModel) {
uint32_t index = mRefSubgraphs.size();
mRefSubgraphs.push_back(makeSubgraph(refModel));
updateOperandLocations(refModel, &mRefSubgraphs.back());
return index;
}
-void ModelBuilder::HidlModelMaker::addExtensions(const ModelBuilder* model) {
- constexpr uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
+void ModelBuilder::ModelMaker::addExtensions(const ModelBuilder* model) {
for (const auto& operand : model->mOperands) {
- if (isExtensionOperandType(operand.type)) {
- addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kLowBitsType);
+ if (isExtension(operand.type)) {
+ addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kExtensionTypeBits);
}
}
for (const auto& operation : model->mOperations) {
- if (isExtensionOperationType(operation.type)) {
- addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kLowBitsType);
+ if (isExtension(operation.type)) {
+ addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kExtensionTypeBits);
}
}
for (const auto& refModel : model->mReferencedModels) {
@@ -973,7 +962,7 @@ void ModelBuilder::HidlModelMaker::addExtensions(const ModelBuilder* model) {
}
}
-void ModelBuilder::HidlModelMaker::addExtensionWithPrefix(uint16_t prefix) {
+void ModelBuilder::ModelMaker::addExtensionWithPrefix(uint16_t prefix) {
if (!mPrefixSet.insert(prefix).second) {
return;
}
diff --git a/nn/runtime/ModelBuilder.h b/nn/runtime/ModelBuilder.h
index 2de68b392..9dd93ffc6 100644
--- a/nn/runtime/ModelBuilder.h
+++ b/nn/runtime/ModelBuilder.h
@@ -23,7 +23,6 @@
#include <memory>
#include <vector>
-#include "HalInterfaces.h"
#include "Memory.h"
#include "NeuralNetworks.h"
#include "Utils.h"
@@ -34,7 +33,7 @@ namespace nn {
class CompilationBuilder;
class Device;
class ExecutionPlan;
-class Memory;
+class RuntimeMemory;
class ModelBuilder {
public:
@@ -44,7 +43,7 @@ class ModelBuilder {
// Adds an operand to the model.
int addOperand(const ANeuralNetworksOperandType& type);
int setOperandValue(uint32_t index, const void* buffer, size_t length);
- int setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
+ int setOperandValueFromMemory(uint32_t index, const RuntimeMemory* memory, uint32_t offset,
size_t length);
int setOperandValueFromModel(uint32_t index, const ModelBuilder* value);
int setOperandSymmPerChannelQuantParams(
@@ -72,7 +71,7 @@ class ModelBuilder {
const std::vector<std::shared_ptr<Device>>& devices,
bool explicitDeviceList = false);
- hal::Model makeHidlModel() const;
+ Model makeModel() const;
uint32_t operandCount() const {
// We don't allow more than uint32_t worth of operands
@@ -89,7 +88,7 @@ class ModelBuilder {
return mInputIndexes[i];
}
const std::vector<uint32_t>& getInputOperandIndexes() const { return mInputIndexes; }
- const hal::Operand& getInputOperand(uint32_t i) const {
+ const Operand& getInputOperand(uint32_t i) const {
uint32_t index = getInputOperandIndex(i);
CHECK_LT(index, mOperands.size());
return mOperands[index];
@@ -99,15 +98,15 @@ class ModelBuilder {
return mOutputIndexes[i];
}
const std::vector<uint32_t>& getOutputOperandIndexes() const { return mOutputIndexes; }
- const hal::Operand& getOutputOperand(uint32_t i) const {
+ const Operand& getOutputOperand(uint32_t i) const {
uint32_t index = getOutputOperandIndex(i);
CHECK_LT(index, mOperands.size());
return mOperands[index];
}
- const hal::Operand& getOperand(uint32_t index) const { return mOperands[index]; }
- const hal::Operation& getOperation(uint32_t index) const { return mOperations[index]; }
+ const Operand& getOperand(uint32_t index) const { return mOperands[index]; }
+ const Operation& getOperation(uint32_t index) const { return mOperations[index]; }
const MemoryTracker& getMemories() const { return mMemories; }
- const std::vector<hal::Operation>& getOperations() const { return mOperations; }
+ const std::vector<Operation>& getOperations() const { return mOperations; }
const std::vector<uint32_t>& getSortedOperationMapping() const {
return mSortedOperationIndexMap;
}
@@ -121,8 +120,8 @@ class ModelBuilder {
CHECK_LT(i, mReferencedModels.size());
return mReferencedModels[i];
}
- const ModelBuilder* getReferencedModel(const hal::Operand& operand) const {
- CHECK(operand.lifetime == hal::OperandLifeTime::SUBGRAPH);
+ const ModelBuilder* getReferencedModel(const Operand& operand) const {
+ CHECK(operand.lifetime == Operand::LifeTime::SUBGRAPH);
return getReferencedModel(operand.location.offset);
}
@@ -174,7 +173,7 @@ class ModelBuilder {
// optional arguments are set to default values. This transformation enables
// more drivers to execute the model. See http://b/147105700.
void removeTrailingArgumentsWithDefaultValues();
- uint32_t getNumTrailingArgumentsToRemove(const hal::Operation& operation) const;
+ uint32_t getNumTrailingArgumentsToRemove(const Operation& operation) const;
// Sorts the operations to be in the correct order for single threaded
// node-at-a-time execution.
@@ -184,7 +183,7 @@ class ModelBuilder {
int copyLargeValuesToSharedMemory();
// The operations of the graph.
- std::vector<hal::Operation> mOperations;
+ std::vector<Operation> mOperations;
// The mapping from sorted index to the original index of operations in mOperations.
// mSortedOperationIndexMap is empty before sortIntoRunOrder() is called.
std::vector<uint32_t> mSortedOperationIndexMap;
@@ -193,7 +192,7 @@ class ModelBuilder {
// Is at least one of those operations an extension operation?
bool mHasExtensionOperation = false;
// The description of the operands of the graph.
- std::vector<hal::Operand> mOperands;
+ std::vector<Operand> mOperands;
// Is at least one of those operands an OEM operand?
bool mHasOEMOperand = false;
// The indexes of input operands of the model.
@@ -233,7 +232,7 @@ class ModelBuilder {
// Models referenced by operands in this model.
std::vector<const ModelBuilder*> mReferencedModels;
- class HidlModelMaker;
+ class ModelMaker;
};
} // namespace nn
diff --git a/nn/runtime/NeuralNetworks.cpp b/nn/runtime/NeuralNetworks.cpp
index f5206c866..0ccb6468c 100644
--- a/nn/runtime/NeuralNetworks.cpp
+++ b/nn/runtime/NeuralNetworks.cpp
@@ -22,6 +22,7 @@
#include "NeuralNetworks.h"
+#include <nnapi/Types.h>
#include <vndk/hardware_buffer.h>
#include <algorithm>
@@ -35,7 +36,6 @@
#include "ControlFlow.h"
#include "Event.h"
#include "ExecutionBuilder.h"
-#include "HalInterfaces.h"
#include "Manager.h"
#include "Memory.h"
#include "MetaModel.h"
@@ -46,7 +46,7 @@
#include "Utils.h"
using namespace android::nn;
-using namespace android::nn::hal;
+using android::sp;
// Make sure the constants defined in the header files have not changed values.
// IMPORTANT: When adding new values, update kNumberOfDataTypes or kNumberOfDataTypesOEM
@@ -558,12 +558,14 @@ static_assert(static_cast<int32_t>(DeviceType::ACCELERATOR) == ANEURALNETWORKS_D
// Make sure that the constants are compatible with the values defined in
// hardware/interfaces/neuralnetworks/1.3/types.hal.
-static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_LOW) == Priority::LOW,
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_LOW) ==
+ Priority::LOW,
"ANEURALNETWORKS_PRIORITY_LOW does not map to Priority::LOW");
-static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) ==
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) ==
Priority::MEDIUM,
"ANEURALNETWORKS_PRIORITY_MEDIUM does not map to Priority::MEDIUM");
-static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_HIGH) == Priority::HIGH,
+static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_HIGH) ==
+ Priority::HIGH,
"ANEURALNETWORKS_PRIORITY_HIGH does not map to Priority::HIGH");
// Asserts for ANeuralNetworksOperandType memory layout
@@ -597,9 +599,8 @@ static_assert(alignof(ANeuralNetworksSymmPerChannelQuantParams) == alignof(void*
// Asserts for compilation caching
static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == 32,
"ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN has changed");
-static_assert(static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) ==
- ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN,
- "Constant::BYTE_SIZE_OF_CACHE_TOKEN != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN");
+static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == kByteSizeOfCacheToken,
+ "ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN != kByteSizeOfCacheToken");
// Asserts for compilation priority
static_assert(ANEURALNETWORKS_PRIORITY_LOW == 90, "ANEURALNETWORKS_PRIORITY_LOW has changed");
@@ -609,14 +610,6 @@ static_assert(ANEURALNETWORKS_PRIORITY_HIGH == 110, "ANEURALNETWORKS_PRIORITY_HI
static_assert(ANEURALNETWORKS_PRIORITY_DEFAULT == ANEURALNETWORKS_PRIORITY_MEDIUM,
"ANEURALNETWORKS_PRIORITY_DEFAULT has changed");
-// Asserts for loop timeout duration
-static_assert(static_cast<uint64_t>(LoopTimeoutDurationNs::DEFAULT) ==
- operation_while::kTimeoutNsDefault,
- "LoopTimeoutDurationNs::DEFAULT != operation_while::kTimeoutNsDefault");
-static_assert(static_cast<uint64_t>(LoopTimeoutDurationNs::MAXIMUM) ==
- operation_while::kTimeoutNsMaximum,
- "LoopTimeoutDurationNs::MAXIMUM != operation_while::kTimeoutNsMaximum");
-
int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) {
if (numDevices == nullptr) {
LOG(ERROR) << "ANeuralNetworks_getDeviceCount passed a nullptr";
@@ -718,7 +711,7 @@ int ANeuralNetworksModel_getSupportedOperationsForDevices(
return ANEURALNETWORKS_BAD_STATE;
}
- const Model hidlModel = m->makeHidlModel();
+ const Model canonicalModel = m->makeModel();
const std::vector<uint32_t>& opMap = m->getSortedOperationMapping();
// init the output array to false for all the operations.
std::fill(supportedOps, supportedOps + opMap.size(), false);
@@ -737,7 +730,7 @@ int ANeuralNetworksModel_getSupportedOperationsForDevices(
}
Device* d = reinterpret_cast<Device*>(const_cast<ANeuralNetworksDevice*>(devices[i]));
- const MetaModel metaModel(hidlModel, DeviceManager::get()->strictSlicing());
+ const MetaModel metaModel(canonicalModel, DeviceManager::get()->strictSlicing());
const std::vector<bool> supportsByDevice = d->getSupportedOperations(metaModel);
for (uint32_t j = 0; j < supportsByDevice.size(); j++) {
uint32_t originalIdx = opMap[j];
@@ -988,9 +981,9 @@ int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNe
LOG(ERROR) << "ANeuralNetworksMemory_copy passed a nullptr";
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- const Memory* s = reinterpret_cast<const Memory*>(src);
- const Memory* d = reinterpret_cast<const Memory*>(dst);
- return Memory::copy(*s, *d);
+ const RuntimeMemory* s = reinterpret_cast<const RuntimeMemory*>(src);
+ const RuntimeMemory* d = reinterpret_cast<const RuntimeMemory*>(dst);
+ return RuntimeMemory::copy(*s, *d);
}
int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd, size_t offset,
@@ -1024,7 +1017,7 @@ int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb,
void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksMemory_free");
// No validation. Free of nullptr is valid.
- Memory* m = reinterpret_cast<Memory*>(memory);
+ RuntimeMemory* m = reinterpret_cast<RuntimeMemory*>(memory);
delete m;
}
@@ -1091,7 +1084,7 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model,
LOG(ERROR) << "ANeuralNetworksModel_setOperandValue passed a nullptr";
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- const Memory* mem = reinterpret_cast<const Memory*>(memory);
+ const RuntimeMemory* mem = reinterpret_cast<const RuntimeMemory*>(memory);
ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model);
return m->setOperandValueFromMemory(index, mem, offset, length);
}
@@ -1302,7 +1295,7 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execut
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- const Memory* m = reinterpret_cast<const Memory*>(memory);
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory);
ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
return r->setInputFromMemory(index, type, m, offset, length);
}
@@ -1330,7 +1323,7 @@ int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execu
}
ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
- const Memory* m = reinterpret_cast<const Memory*>(memory);
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory);
return r->setOutputFromMemory(index, type, m, offset, length);
}
diff --git a/nn/runtime/TypeManager.cpp b/nn/runtime/TypeManager.cpp
index 03fac20ac..932dcc741 100644
--- a/nn/runtime/TypeManager.cpp
+++ b/nn/runtime/TypeManager.cpp
@@ -48,11 +48,7 @@ inline bool StartsWith(std::string_view sv, std::string_view prefix) {
namespace {
-using namespace hal;
-
-const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
-const uint32_t kMaxPrefix =
- (1 << static_cast<uint8_t>(ExtensionTypeEncoding::HIGH_BITS_PREFIX)) - 1;
+constexpr uint32_t kMaxPrefix = (1 << kExtensionPrefixBits) - 1;
// Checks if the two structures contain the same information. The order of
// operand types within the structures does not matter.
@@ -235,7 +231,7 @@ bool TypeManager::getExtensionType(const char* extensionName, uint16_t typeWithi
int32_t* type) {
uint16_t prefix;
NN_RET_CHECK(getExtensionPrefix(extensionName, &prefix));
- *type = (prefix << kLowBitsType) | typeWithinExtension;
+ *type = (prefix << kExtensionTypeBits) | typeWithinExtension;
return true;
}
@@ -249,8 +245,8 @@ bool TypeManager::getExtensionInfo(uint16_t prefix, const Extension** extension)
bool TypeManager::getExtensionOperandTypeInfo(
OperandType type, const Extension::OperandTypeInformation** info) const {
uint32_t operandType = static_cast<uint32_t>(type);
- uint16_t prefix = operandType >> kLowBitsType;
- uint16_t typeWithinExtension = operandType & ((1 << kLowBitsType) - 1);
+ uint16_t prefix = operandType >> kExtensionTypeBits;
+ uint16_t typeWithinExtension = operandType & ((1 << kExtensionTypeBits) - 1);
const Extension* extension;
NN_RET_CHECK(getExtensionInfo(prefix, &extension))
<< "Cannot find extension corresponding to prefix " << prefix;
@@ -268,7 +264,7 @@ bool TypeManager::getExtensionOperandTypeInfo(
}
bool TypeManager::isTensorType(OperandType type) const {
- if (!isExtensionOperandType(type)) {
+ if (!isExtension(type)) {
return !nonExtensionOperandTypeIsScalar(static_cast<int>(type));
}
const Extension::OperandTypeInformation* info;
@@ -278,7 +274,7 @@ bool TypeManager::isTensorType(OperandType type) const {
uint32_t TypeManager::getSizeOfData(OperandType type,
const std::vector<uint32_t>& dimensions) const {
- if (!isExtensionOperandType(type)) {
+ if (!isExtension(type)) {
return nonExtensionOperandSizeOfData(type, dimensions);
}
const Extension::OperandTypeInformation* info;
@@ -286,9 +282,9 @@ uint32_t TypeManager::getSizeOfData(OperandType type,
return info->isTensor ? sizeOfTensorData(info->byteSize, dimensions) : info->byteSize;
}
-bool TypeManager::sizeOfDataOverflowsUInt32(hal::OperandType type,
+bool TypeManager::sizeOfDataOverflowsUInt32(OperandType type,
const std::vector<uint32_t>& dimensions) const {
- if (!isExtensionOperandType(type)) {
+ if (!isExtension(type)) {
return nonExtensionOperandSizeOfDataOverflowsUInt32(type, dimensions);
}
const Extension::OperandTypeInformation* info;
diff --git a/nn/runtime/TypeManager.h b/nn/runtime/TypeManager.h
index a06ddb6ee..5236ba7f0 100644
--- a/nn/runtime/TypeManager.h
+++ b/nn/runtime/TypeManager.h
@@ -48,18 +48,18 @@ class TypeManager {
// Looks up information about the extension corresponding to the given prefix
//
// Returns false if no extension corresponds to the given prefix.
- bool getExtensionInfo(uint16_t prefix, const hal::Extension** extension) const;
+ bool getExtensionInfo(uint16_t prefix, const Extension** extension) const;
// Looks up information about an extension operand type
//
// Returns false if the extension or type is unknown.
- bool getExtensionOperandTypeInfo(hal::OperandType type,
- const hal::Extension::OperandTypeInformation** info) const;
+ bool getExtensionOperandTypeInfo(OperandType type,
+ const Extension::OperandTypeInformation** info) const;
// Returns true if an operand type is a tensor type.
//
// Aborts if the type is an unknown extension type.
- bool isTensorType(hal::OperandType type) const;
+ bool isTensorType(OperandType type) const;
// Returns the amount of space needed to store a value of the dimensions and
// type of this operand. For a tensor with unspecified rank or at least one
@@ -67,7 +67,7 @@ class TypeManager {
//
// Aborts if the type is an unknown extension type.
// Aborts if the size would overflow the return type.
- uint32_t getSizeOfData(const hal::Operand& operand) const {
+ uint32_t getSizeOfData(const Operand& operand) const {
return getSizeOfData(operand.type, operand.dimensions);
}
@@ -76,14 +76,13 @@ class TypeManager {
// unspecified dimension, returns zero.
//
// Aborts if the type is an unknown extension type.
- uint32_t getSizeOfData(hal::OperandType type, const std::vector<uint32_t>& dimensions) const;
+ uint32_t getSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) const;
// Returns true if the amount of space needed to store a value of the specified
// dimensions and element size overflows the uint32_t type.
//
// See also TypeManager::sizeOfDataOverflowsUInt32().
- bool sizeOfDataOverflowsUInt32(hal::OperandType type,
- const std::vector<uint32_t>& dimensions) const;
+ bool sizeOfDataOverflowsUInt32(OperandType type, const std::vector<uint32_t>& dimensions) const;
// Returns true if extensions usage is allowed in current process.
bool areExtensionsAllowed() const { return mExtensionsAllowed; }
@@ -93,7 +92,7 @@ class TypeManager {
// Registers an extension.
//
// Returns true if the registration was successful.
- bool forTest_registerExtension(const hal::Extension& extension) {
+ bool forTest_registerExtension(const Extension& extension) {
return registerExtension(extension, "INTERNAL TEST");
}
@@ -135,7 +134,7 @@ class TypeManager {
private:
TypeManager();
void findAvailableExtensions();
- bool registerExtension(hal::Extension extension, const std::string& deviceName);
+ bool registerExtension(Extension extension, const std::string& deviceName);
// Returns the numeric "prefix" value corresponding to an extension.
//
@@ -145,7 +144,7 @@ class TypeManager {
const DeviceManager* mDeviceManager = DeviceManager::get();
// Contains all registered extensions.
- std::map<std::string, hal::Extension> mExtensionNameToExtension;
+ std::map<std::string, Extension> mExtensionNameToExtension;
// Contains the name of the first discovered device that supports an
// extension. Used for error reporting.
@@ -160,7 +159,7 @@ class TypeManager {
std::map<std::string, uint16_t> mExtensionNameToPrefix;
// Entries of mPrefixToExtension point into mExtensionNameToExtension.
// prefix=0 corresponds to no extension and should never be looked up.
- std::vector<hal::Extension*> mPrefixToExtension = {nullptr};
+ std::vector<Extension*> mPrefixToExtension = {nullptr};
// True if Extensions can be used in current process.
bool mExtensionsAllowed = false;
diff --git a/nn/runtime/VersionedInterfaces.cpp b/nn/runtime/VersionedInterfaces.cpp
index ccb29dc22..fce558c38 100644
--- a/nn/runtime/VersionedInterfaces.cpp
+++ b/nn/runtime/VersionedInterfaces.cpp
@@ -18,13 +18,12 @@
#include "VersionedInterfaces.h"
-#include <fcntl.h>
-
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <android-base/scopeguard.h>
#include <android-base/thread_annotations.h>
#include <cutils/native_handle.h>
+#include <fcntl.h>
#include <algorithm>
#include <chrono>
@@ -104,18 +103,14 @@ namespace nn {
// anonymous namespace
namespace {
-using namespace hal;
-
-const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-
-void sendFailureMessage(IPreparedModelCallback* cb) {
+void sendFailureMessage(V1_3::IPreparedModelCallback* cb) {
CHECK(cb != nullptr);
- cb->notify_1_3(ErrorStatus::GENERAL_FAILURE, nullptr);
+ cb->notify_1_3(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr);
}
// This class is thread safe
template <typename Callback>
-class DeathHandler : public hidl_death_recipient {
+class DeathHandler : public hardware::hidl_death_recipient {
public:
void serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) override {
LOG(ERROR) << "DeathHandler::serviceDied -- service unexpectedly died!";
@@ -164,7 +159,7 @@ static std::pair<int, std::shared_ptr<VersionedIPreparedModel>> makeVersionedIPr
// proactively handle service crashes. If the linkToDeath call fails,
// asynchronous calls are susceptible to hangs if the service crashes before
// providing the response.
- const Return<bool> ret = preparedModel->linkToDeath(deathHandler, 0);
+ const hardware::Return<bool> ret = preparedModel->linkToDeath(deathHandler, 0);
if (ret.isDeadObject()) {
LOG(ERROR) << "makeVersionedIPreparedModel failed to register a death recipient for the "
"IPreparedModel object because the IPreparedModel object is dead.";
@@ -206,10 +201,10 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
const auto failDeadObject = []() -> std::tuple<int, std::vector<OutputShape>, Timing> {
- return {ANEURALNETWORKS_DEAD_OBJECT, {}, kNoTiming};
+ return {ANEURALNETWORKS_DEAD_OBJECT, {}, {}};
};
- const auto failWithStatus = [](ErrorStatus status) {
- return getExecutionResult(status, {}, kNoTiming);
+ const auto failWithStatus = [](V1_3::ErrorStatus status) {
+ return getExecutionResult(status, {}, {});
};
const auto getResults = [failDeadObject](const ExecutionCallback& cb) {
if (cb.isDeadObject()) {
@@ -221,21 +216,23 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
const sp<ExecutionCallback> callback = new ExecutionCallback();
const auto scoped = mDeathHandler->protectCallback(callback);
- // version 1.3+ HAL
+ // version 1.3 HAL
+ const V1_3::Request request13 = convertToV1_3(request);
if (mPreparedModelV1_3 != nullptr) {
const auto otp = makeTimePoint(deadline);
- Return<ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(request, measure, otp,
- loopTimeoutDuration, callback);
+ hardware::Return<V1_3::ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(
+ request13, convertToV1_2(measure), convertToV1_3(otp),
+ convertToV1_3(loopTimeoutDuration), callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "execute_1_3 failure: " << ret.description();
return failDeadObject();
}
if (!ret.isOk()) {
LOG(ERROR) << "execute_1_3 failure: " << ret.description();
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
- if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "execute_1_3 returned " << toString(static_cast<ErrorStatus>(ret));
+ if (ret != V1_3::ErrorStatus::NONE) {
+ LOG(ERROR) << "execute_1_3 returned " << toString(static_cast<V1_3::ErrorStatus>(ret));
return failWithStatus(ret);
}
callback->wait();
@@ -244,21 +241,21 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
// version 1.2 HAL
if (mPreparedModelV1_2 != nullptr) {
- const bool compliant = compliantWithV1_2(request);
+ const bool compliant = compliantWithV1_2(request13);
if (!compliant) {
LOG(ERROR) << "Could not handle execute_1_2!";
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
- const V1_0::Request request12 = convertToV1_2(request);
- Return<V1_0::ErrorStatus> ret =
- mPreparedModelV1_2->execute_1_2(request12, measure, callback);
+ const V1_0::Request request12 = convertToV1_2(request13);
+ hardware::Return<V1_0::ErrorStatus> ret =
+ mPreparedModelV1_2->execute_1_2(request12, convertToV1_2(measure), callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "execute_1_2 failure: " << ret.description();
return failDeadObject();
}
if (!ret.isOk()) {
LOG(ERROR) << "execute_1_2 failure: " << ret.description();
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
if (status != V1_0::ErrorStatus::NONE) {
@@ -271,20 +268,20 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
// version 1.0 HAL
if (mPreparedModelV1_0 != nullptr) {
- const bool compliant = compliantWithV1_0(request);
+ const bool compliant = compliantWithV1_0(request13);
if (!compliant) {
LOG(ERROR) << "Could not handle execute!";
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
- const V1_0::Request request10 = convertToV1_0(request);
- Return<V1_0::ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback);
+ const V1_0::Request request10 = convertToV1_0(request13);
+ hardware::Return<V1_0::ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "execute failure: " << ret.description();
return failDeadObject();
}
if (!ret.isOk()) {
LOG(ERROR) << "execute failure: " << ret.description();
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
if (status != V1_0::ErrorStatus::NONE) {
@@ -297,24 +294,27 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
// No prepared model available
LOG(ERROR) << "executeAsynchronously called with no preparedModel";
- return failWithStatus(ErrorStatus::GENERAL_FAILURE);
+ return failWithStatus(V1_3::ErrorStatus::GENERAL_FAILURE);
}
std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::executeSynchronously(
const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration) const {
const std::tuple<int, std::vector<OutputShape>, Timing> kDeadObject = {
- ANEURALNETWORKS_DEAD_OBJECT, {}, kNoTiming};
- const auto kFailure = getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ ANEURALNETWORKS_DEAD_OBJECT, {}, {}};
+ const auto kFailure = getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, {});
- // version 1.3+ HAL
+ // version 1.3 HAL
+ const V1_3::Request request13 = convertToV1_3(request);
if (mPreparedModelV1_3 != nullptr) {
std::tuple<int, std::vector<OutputShape>, Timing> result;
const auto otp = makeTimePoint(deadline);
- Return<void> ret = mPreparedModelV1_3->executeSynchronously_1_3(
- request, measure, otp, loopTimeoutDuration,
- [&result](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
+ hardware::Return<void> ret = mPreparedModelV1_3->executeSynchronously_1_3(
+ request13, convertToV1_2(measure), convertToV1_3(otp),
+ convertToV1_3(loopTimeoutDuration),
+ [&result](V1_3::ErrorStatus error,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
result = getExecutionResult(error, outputShapes, timing);
});
if (ret.isDeadObject()) {
@@ -330,18 +330,19 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
// version 1.2 HAL
if (mPreparedModelV1_2 != nullptr) {
- const bool compliant = compliantWithV1_2(request);
+ const bool compliant = compliantWithV1_2(request13);
if (!compliant) {
LOG(ERROR) << "Could not handle executeSynchronously!";
return kFailure;
}
- const V1_0::Request request12 = convertToV1_2(request);
+ const V1_0::Request request12 = convertToV1_2(request13);
std::tuple<int, std::vector<OutputShape>, Timing> result;
- Return<void> ret = mPreparedModelV1_2->executeSynchronously(
- request12, measure,
- [&result](V1_0::ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
+ hardware::Return<void> ret = mPreparedModelV1_2->executeSynchronously(
+ request12, convertToV1_2(measure),
+ [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
result = getExecutionResult(convertToV1_3(error), outputShapes, timing);
});
if (ret.isDeadObject()) {
@@ -363,11 +364,11 @@ std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execu
const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
const OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const {
if (preferSynchronous) {
- VLOG(EXECUTION) << "Before executeSynchronously() " << SHOW_IF_DEBUG(toString(request));
+ VLOG(EXECUTION) << "Before executeSynchronously() " << SHOW_IF_DEBUG(request);
return executeSynchronously(request, measure, deadline, loopTimeoutDuration);
}
- VLOG(EXECUTION) << "Before executeAsynchronously() " << SHOW_IF_DEBUG(toString(request));
+ VLOG(EXECUTION) << "Before executeAsynchronously() " << SHOW_IF_DEBUG(request);
return executeAsynchronously(request, measure, deadline, loopTimeoutDuration);
}
@@ -397,13 +398,15 @@ std::shared_ptr<ExecutionBurstController> VersionedIPreparedModel::configureExec
return ExecutionBurstController::create(mPreparedModelV1_2, pollingTimeWindow);
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_3::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_3::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_3");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities_1_3(
- [&result](ErrorStatus error, const Capabilities& capabilities) {
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities_1_3(
+ [&result](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) {
result = std::make_pair(error, capabilities);
});
if (!ret.isOk()) {
@@ -413,38 +416,39 @@ static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_3::IDevic
return result;
}
-std::tuple<int, hal::hidl_handle, sp<hal::IFencedExecutionCallback>, hal::Timing>
-VersionedIPreparedModel::executeFenced(
- const hal::Request& request, const hal::hidl_vec<hal::hidl_handle>& waitFor,
- MeasureTiming measure, const std::optional<Deadline>& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) {
- // version 1.3+ HAL
- hal::hidl_handle syncFence;
- sp<hal::IFencedExecutionCallback> dispatchCallback;
- hal::Timing timing = {UINT64_MAX, UINT64_MAX};
+std::tuple<int, hardware::hidl_handle, sp<V1_3::IFencedExecutionCallback>, Timing>
+VersionedIPreparedModel::executeFenced(const Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ MeasureTiming measure,
+ const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence) {
+ // version 1.3 HAL
+ hardware::hidl_handle syncFence;
+ sp<V1_3::IFencedExecutionCallback> dispatchCallback;
+ Timing timing = {UINT64_MAX, UINT64_MAX};
if (mPreparedModelV1_3 != nullptr) {
ErrorStatus errorStatus;
const auto otp = makeTimePoint(deadline);
- Return<void> ret = mPreparedModelV1_3->executeFenced(
- request, waitFor, measure, otp, loopTimeoutDuration, timeoutDurationAfterFence,
+ hardware::Return<void> ret = mPreparedModelV1_3->executeFenced(
+ convertToV1_3(request), waitFor, convertToV1_2(measure), convertToV1_3(otp),
+ convertToV1_3(loopTimeoutDuration), convertToV1_3(timeoutDurationAfterFence),
[&syncFence, &errorStatus, &dispatchCallback](
- ErrorStatus error, const hidl_handle& handle,
- const sp<hal::IFencedExecutionCallback>& callback) {
+ V1_3::ErrorStatus error, const hardware::hidl_handle& handle,
+ const sp<V1_3::IFencedExecutionCallback>& callback) {
syncFence = handle;
- errorStatus = error;
+ errorStatus = uncheckedConvert(error);
dispatchCallback = callback;
});
if (!ret.isOk()) {
LOG(ERROR) << "executeFenced failure: " << ret.description();
- return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
if (errorStatus != ErrorStatus::NONE) {
- LOG(ERROR) << "executeFenced returned "
- << toString(static_cast<ErrorStatus>(errorStatus));
+ LOG(ERROR) << "executeFenced returned " << errorStatus;
return std::make_tuple(convertErrorStatusToResultCode(errorStatus),
- hal::hidl_handle(nullptr), nullptr, timing);
+ hardware::hidl_handle(nullptr), nullptr, timing);
}
return std::make_tuple(ANEURALNETWORKS_NO_ERROR, syncFence, dispatchCallback, timing);
}
@@ -454,33 +458,35 @@ VersionedIPreparedModel::executeFenced(
LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution";
for (const auto& fenceHandle : waitFor) {
if (!fenceHandle.getNativeHandle()) {
- return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
int syncFd = fenceHandle.getNativeHandle()->data[0];
if (syncFd <= 0) {
- return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
auto r = syncWait(syncFd, -1);
if (r != FenceState::SIGNALED) {
LOG(ERROR) << "syncWait failed, fd: " << syncFd;
- return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hal::hidl_handle(nullptr), nullptr,
- timing);
+ return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr),
+ nullptr, timing);
}
}
int errorCode;
std::tie(errorCode, std::ignore, timing) =
executeSynchronously(request, measure, deadline, loopTimeoutDuration);
- return std::make_tuple(errorCode, hal::hidl_handle(nullptr), nullptr, timing);
+ return std::make_tuple(errorCode, hardware::hidl_handle(nullptr), nullptr, timing);
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_2::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_2::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_2");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities_1_2(
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities_1_2(
[&result](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities) {
result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
});
@@ -491,12 +497,14 @@ static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_2::IDevic
return result;
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_1::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_1::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_1");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities_1_1(
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities_1_1(
[&result](V1_0::ErrorStatus error, const V1_1::Capabilities& capabilities) {
// Time taken to convert capabilities is trivial
result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
@@ -508,12 +516,14 @@ static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_1::IDevic
return result;
}
-static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_0::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, V1_3::Capabilities> getCapabilitiesFunction(
+ V1_0::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities");
- const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, Capabilities> result = kFailure;
- const Return<void> ret = device->getCapabilities(
+ const std::pair<V1_3::ErrorStatus, V1_3::Capabilities> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, V1_3::Capabilities> result = kFailure;
+ const hardware::Return<void> ret = device->getCapabilities(
[&result](V1_0::ErrorStatus error, const V1_0::Capabilities& capabilities) {
// Time taken to convert capabilities is trivial
result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities));
@@ -525,14 +535,16 @@ static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_0::IDevic
return result;
}
-static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunction(
- V1_2::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>>
+getSupportedExtensionsFunction(V1_2::IDevice* device) {
CHECK(device != nullptr);
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getSupportedExtensions");
- const std::pair<ErrorStatus, hidl_vec<Extension>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, hidl_vec<Extension>> result = kFailure;
- const Return<void> ret = device->getSupportedExtensions(
- [&result](V1_0::ErrorStatus error, const hidl_vec<Extension>& extensions) {
+ const std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>> result = kFailure;
+ const hardware::Return<void> ret = device->getSupportedExtensions(
+ [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<V1_2::Extension>& extensions) {
result = std::make_pair(convertToV1_3(error), extensions);
});
if (!ret.isOk()) {
@@ -542,18 +554,18 @@ static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunctio
return result;
}
-static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunction(
- V1_0::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::Extension>>
+getSupportedExtensionsFunction(V1_0::IDevice* device) {
CHECK(device != nullptr);
- return {ErrorStatus::NONE, {/* No extensions. */}};
+ return {V1_3::ErrorStatus::NONE, {/* No extensions. */}};
}
static int32_t getTypeFunction(V1_2::IDevice* device) {
CHECK(device != nullptr);
constexpr int32_t kFailure = -1;
int32_t result = kFailure;
- const Return<void> ret =
- device->getType([&result](V1_0::ErrorStatus error, DeviceType deviceType) {
+ const hardware::Return<void> ret =
+ device->getType([&result](V1_0::ErrorStatus error, V1_2::DeviceType deviceType) {
if (error == V1_0::ErrorStatus::NONE) {
result = static_cast<int32_t>(deviceType);
}
@@ -570,12 +582,14 @@ static int32_t getTypeFunction(V1_0::IDevice* device) {
return ANEURALNETWORKS_DEVICE_UNKNOWN;
}
-static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_2::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_string> getVersionStringFunction(
+ V1_2::IDevice* device) {
CHECK(device != nullptr);
- const std::pair<ErrorStatus, hidl_string> kFailure = {ErrorStatus::GENERAL_FAILURE, ""};
- std::pair<ErrorStatus, hidl_string> result = kFailure;
- const Return<void> ret = device->getVersionString(
- [&result](V1_0::ErrorStatus error, const hidl_string& version) {
+ const std::pair<V1_3::ErrorStatus, hardware::hidl_string> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, ""};
+ std::pair<V1_3::ErrorStatus, hardware::hidl_string> result = kFailure;
+ const hardware::Return<void> ret = device->getVersionString(
+ [&result](V1_0::ErrorStatus error, const hardware::hidl_string& version) {
result = std::make_pair(convertToV1_3(error), version);
});
if (!ret.isOk()) {
@@ -585,18 +599,19 @@ static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_2::IDevic
return result;
}
-static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_0::IDevice* device) {
+static std::pair<V1_3::ErrorStatus, hardware::hidl_string> getVersionStringFunction(
+ V1_0::IDevice* device) {
CHECK(device != nullptr);
- return {ErrorStatus::NONE, "UNKNOWN"};
+ return {V1_3::ErrorStatus::NONE, "UNKNOWN"};
}
-static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
+static std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
V1_2::IDevice* device) {
CHECK(device != nullptr);
- constexpr std::tuple<ErrorStatus, uint32_t, uint32_t> kFailure = {ErrorStatus::GENERAL_FAILURE,
- 0, 0};
- std::tuple<ErrorStatus, uint32_t, uint32_t> result = kFailure;
- const Return<void> ret = device->getNumberOfCacheFilesNeeded(
+ constexpr std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> kFailure = {
+ V1_3::ErrorStatus::GENERAL_FAILURE, 0, 0};
+ std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> result = kFailure;
+ const hardware::Return<void> ret = device->getNumberOfCacheFilesNeeded(
[&result](V1_0::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
result = {convertToV1_3(error), numModelCache, numDataCache};
});
@@ -607,17 +622,17 @@ static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFu
return result;
}
-static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
+static std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction(
V1_0::IDevice* device) {
CHECK(device != nullptr);
- return {ErrorStatus::NONE, 0, 0};
+ return {V1_3::ErrorStatus::NONE, 0, 0};
}
struct InitialData {
- hal::Capabilities capabilities;
- hal::hidl_vec<hal::Extension> supportedExtensions;
+ V1_3::Capabilities capabilities;
+ hardware::hidl_vec<V1_2::Extension> supportedExtensions;
int32_t type;
- hal::hidl_string versionString;
+ hardware::hidl_string versionString;
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded;
};
@@ -626,7 +641,7 @@ static std::optional<InitialData> initializeFunction(Device* device) {
CHECK(device != nullptr);
auto [capabilitiesStatus, capabilities] = getCapabilitiesFunction(device);
- if (capabilitiesStatus != ErrorStatus::NONE) {
+ if (capabilitiesStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getCapabilities* returned the error "
<< toString(capabilitiesStatus);
return std::nullopt;
@@ -634,7 +649,7 @@ static std::optional<InitialData> initializeFunction(Device* device) {
VLOG(MANAGER) << "Capab " << toString(capabilities);
auto [versionStatus, versionString] = getVersionStringFunction(device);
- if (versionStatus != ErrorStatus::NONE) {
+ if (versionStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getVersionString returned the error " << toString(versionStatus);
return std::nullopt;
}
@@ -647,7 +662,7 @@ static std::optional<InitialData> initializeFunction(Device* device) {
}
auto [extensionsStatus, supportedExtensions] = getSupportedExtensionsFunction(device);
- if (extensionsStatus != ErrorStatus::NONE) {
+ if (extensionsStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getSupportedExtensions returned the error "
<< toString(extensionsStatus);
return std::nullopt;
@@ -655,7 +670,7 @@ static std::optional<InitialData> initializeFunction(Device* device) {
const auto [cacheFilesStatus, numModelCacheFiles, numDataCacheFiles] =
getNumberOfCacheFilesNeededFunction(device);
- if (cacheFilesStatus != ErrorStatus::NONE) {
+ if (cacheFilesStatus != V1_3::ErrorStatus::NONE) {
LOG(ERROR) << "IDevice::getNumberOfCacheFilesNeeded returned the error "
<< toString(cacheFilesStatus);
return std::nullopt;
@@ -663,7 +678,7 @@ static std::optional<InitialData> initializeFunction(Device* device) {
// The following limit is enforced by VTS
constexpr uint32_t maxNumCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES);
if (numModelCacheFiles > maxNumCacheFiles || numDataCacheFiles > maxNumCacheFiles) {
LOG(ERROR)
<< "IDevice::getNumberOfCacheFilesNeeded returned invalid number of cache files: "
@@ -684,7 +699,7 @@ static std::optional<InitialData> initializeFunction(Device* device) {
template <typename Core>
std::optional<InitialData> initialize(const Core& core) {
- // version 1.3+ HAL
+ // version 1.3 HAL
if (const auto device = core.template getDevice<V1_3::IDevice>()) {
return initializeFunction(device.get());
}
@@ -710,7 +725,7 @@ std::optional<InitialData> initialize(const Core& core) {
}
std::shared_ptr<VersionedIDevice> VersionedIDevice::create(std::string serviceName,
- const DeviceFactory& makeDevice) {
+ const HalDeviceFactory& makeDevice) {
CHECK(makeDevice != nullptr)
<< "VersionedIDevice::create passed invalid device factory object.";
@@ -736,15 +751,16 @@ std::shared_ptr<VersionedIDevice> VersionedIDevice::create(std::string serviceNa
auto [capabilities, supportedExtensions, type, versionString, numberOfCacheFilesNeeded] =
std::move(*initialData);
return std::make_shared<VersionedIDevice>(
- std::move(capabilities), std::move(supportedExtensions), type, std::move(versionString),
- numberOfCacheFilesNeeded, std::move(serviceName), makeDevice, std::move(core.value()));
+ uncheckedConvert(capabilities), uncheckedConvert(supportedExtensions), type,
+ std::move(versionString), numberOfCacheFilesNeeded, std::move(serviceName), makeDevice,
+ std::move(core.value()));
}
-VersionedIDevice::VersionedIDevice(hal::Capabilities capabilities,
- std::vector<hal::Extension> supportedExtensions, int32_t type,
+VersionedIDevice::VersionedIDevice(Capabilities capabilities,
+ std::vector<Extension> supportedExtensions, int32_t type,
std::string versionString,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
- std::string serviceName, const DeviceFactory& makeDevice,
+ std::string serviceName, const HalDeviceFactory& makeDevice,
Core core)
: kCapabilities(std::move(capabilities)),
kSupportedExtensions(std::move(supportedExtensions)),
@@ -765,7 +781,7 @@ std::optional<VersionedIDevice::Core> VersionedIDevice::Core::create(sp<V1_0::ID
// proactively handle service crashes. If the linkToDeath call fails,
// asynchronous calls are susceptible to hangs if the service crashes before
// providing the response.
- const Return<bool> ret = device->linkToDeath(deathHandler, 0);
+ const hardware::Return<bool> ret = device->linkToDeath(deathHandler, 0);
if (!ret.isOk()) {
LOG(ERROR) << "VersionedIDevice::Core::create failed to register a death recipient for the "
"IDevice object because of failure: "
@@ -828,12 +844,13 @@ std::pair<sp<T_IDevice>, sp<IDeviceDeathHandler>> VersionedIDevice::Core::getDev
}
template <typename T_Return, typename T_IDevice, typename T_Callback>
-Return<T_Return> callProtected(const char* context,
- const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
- const sp<T_IDevice>& device, const sp<T_Callback>& callback,
- const sp<IDeviceDeathHandler>& deathHandler) {
+hardware::Return<T_Return> callProtected(
+ const char* context,
+ const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
+ const sp<T_IDevice>& device, const sp<T_Callback>& callback,
+ const sp<IDeviceDeathHandler>& deathHandler) {
const auto scoped = deathHandler->protectCallback(callback);
- Return<T_Return> ret = fn(device);
+ hardware::Return<T_Return> ret = fn(device);
// Suppose there was a transport error. We have the following cases:
// 1. Either not due to a dead device, or due to a device that was
// already dead at the time of the call to protectCallback(). In
@@ -863,16 +880,16 @@ Return<T_Return> callProtected(const char* context,
return ret;
}
template <typename T_Return, typename T_IDevice>
-Return<T_Return> callProtected(const char*,
- const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
- const sp<T_IDevice>& device, const std::nullptr_t&,
- const sp<IDeviceDeathHandler>&) {
+hardware::Return<T_Return> callProtected(
+ const char*, const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
+ const sp<T_IDevice>& device, const std::nullptr_t&, const sp<IDeviceDeathHandler>&) {
return fn(device);
}
template <typename T_Return, typename T_IDevice, typename T_Callback>
-Return<T_Return> VersionedIDevice::recoverable(
- const char* context, const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn,
+hardware::Return<T_Return> VersionedIDevice::recoverable(
+ const char* context,
+ const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
const T_Callback& callback) const EXCLUDES(mMutex) {
CHECK_EQ(callback == nullptr, (std::is_same_v<T_Callback, std::nullptr_t>));
@@ -880,7 +897,7 @@ Return<T_Return> VersionedIDevice::recoverable(
sp<IDeviceDeathHandler> deathHandler;
std::tie(device, deathHandler) = getDeviceAndDeathHandler<T_IDevice>();
- Return<T_Return> ret = callProtected(context, fn, device, callback, deathHandler);
+ hardware::Return<T_Return> ret = callProtected(context, fn, device, callback, deathHandler);
if (ret.isDeadObject()) {
{
@@ -958,42 +975,42 @@ const std::vector<Extension>& VersionedIDevice::getSupportedExtensions() const {
return kSupportedExtensions;
}
-std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
+std::pair<ErrorStatus, std::vector<bool>> VersionedIDevice::getSupportedOperations(
const MetaModel& metaModel) const {
- const std::pair<ErrorStatus, hidl_vec<bool>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
- std::pair<ErrorStatus, hidl_vec<bool>> result;
+ const std::pair<ErrorStatus, std::vector<bool>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
+ std::pair<ErrorStatus, std::vector<bool>> result;
const Model& model = metaModel.getModel();
auto noneSupported = [&model] {
- hidl_vec<bool> supported(model.main.operations.size());
- std::fill(supported.begin(), supported.end(), false);
+ std::vector<bool> supported(model.main.operations.size(), false);
return std::make_pair(ErrorStatus::NONE, std::move(supported));
};
- auto remappedResult = [&model](const std::pair<ErrorStatus, hidl_vec<bool>>& result,
- const std::function<uint32_t(uint32_t)>&
- slicedModelOperationIndexToModelOperationIndex) {
- const ErrorStatus status = result.first;
- const hidl_vec<bool>& supported = result.second;
- hidl_vec<bool> remappedSupported(model.main.operations.size());
- std::fill(remappedSupported.begin(), remappedSupported.end(), false);
- for (size_t i = 0; i < supported.size(); ++i) {
- if (supported[i]) {
- remappedSupported[slicedModelOperationIndexToModelOperationIndex(i)] = true;
- }
- }
- return std::make_pair(status, std::move(remappedSupported));
- };
+ auto remappedResult =
+ [&model](const std::pair<ErrorStatus, std::vector<bool>>& result,
+ const MetaModel::Mapper& slicedModelOperationIndexToModelOperationIndex) {
+ const ErrorStatus status = result.first;
+ const std::vector<bool>& supported = result.second;
+ std::vector<bool> remappedSupported(model.main.operations.size(), false);
+ for (size_t i = 0; i < supported.size(); ++i) {
+ if (supported[i]) {
+ remappedSupported[slicedModelOperationIndexToModelOperationIndex(i)] = true;
+ }
+ }
+ return std::make_pair(status, std::move(remappedSupported));
+ };
- // version 1.3+ HAL
+ // version 1.3 HAL
+ const V1_3::Model model13 = convertToV1_3(model);
if (getDevice<V1_3::IDevice>() != nullptr) {
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_3");
- Return<void> ret = recoverable<void, V1_3::IDevice>(
- __FUNCTION__, [&model, &result](const sp<V1_3::IDevice>& device) {
+ hardware::Return<void> ret = recoverable<void, V1_3::IDevice>(
+ __FUNCTION__, [&model13, &result](const sp<V1_3::IDevice>& device) {
return device->getSupportedOperations_1_3(
- model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(error, supported);
+ model13, [&result](V1_3::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1005,11 +1022,11 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
// version 1.2 HAL
if (getDevice<V1_2::IDevice>() != nullptr) {
- const bool compliant = compliantWithV1_2(model);
+ const bool compliant = compliantWithV1_2(model13);
V1_2::Model model12;
- std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex;
+ MetaModel::Mapper slicedModelOperationIndexToModelOperationIndex;
if (compliant) {
- model12 = convertToV1_2(model);
+ model12 = convertToV1_2(model13);
} else {
const auto slice12 = metaModel.getSliceV1_2();
if (!slice12.has_value()) {
@@ -1018,12 +1035,12 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
std::tie(model12, slicedModelOperationIndexToModelOperationIndex) = *slice12;
}
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_2");
- Return<void> ret = recoverable<void, V1_2::IDevice>(
+ hardware::Return<void> ret = recoverable<void, V1_2::IDevice>(
__FUNCTION__, [&model12, &result](const sp<V1_2::IDevice>& device) {
return device->getSupportedOperations_1_2(
- model12,
- [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(convertToV1_3(error), supported);
+ model12, [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1038,11 +1055,11 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
// version 1.1 HAL
if (getDevice<V1_1::IDevice>() != nullptr) {
- const bool compliant = compliantWithV1_1(model);
+ const bool compliant = compliantWithV1_1(model13);
V1_1::Model model11;
- std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex;
+ MetaModel::Mapper slicedModelOperationIndexToModelOperationIndex;
if (compliant) {
- model11 = convertToV1_1(model);
+ model11 = convertToV1_1(model13);
} else {
const auto slice11 = metaModel.getSliceV1_1();
if (!slice11.has_value()) {
@@ -1051,12 +1068,12 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
std::tie(model11, slicedModelOperationIndexToModelOperationIndex) = *slice11;
}
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_1");
- Return<void> ret = recoverable<void, V1_1::IDevice>(
+ hardware::Return<void> ret = recoverable<void, V1_1::IDevice>(
__FUNCTION__, [&model11, &result](const sp<V1_1::IDevice>& device) {
return device->getSupportedOperations_1_1(
- model11,
- [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(convertToV1_3(error), supported);
+ model11, [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1071,11 +1088,11 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
// version 1.0 HAL
if (getDevice<V1_0::IDevice>() != nullptr) {
- const bool compliant = compliantWithV1_0(model);
+ const bool compliant = compliantWithV1_0(model13);
V1_0::Model model10;
- std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex;
+ MetaModel::Mapper slicedModelOperationIndexToModelOperationIndex;
if (compliant) {
- model10 = convertToV1_0(model);
+ model10 = convertToV1_0(model13);
} else {
const auto slice10 = metaModel.getSliceV1_0();
if (!slice10.has_value()) {
@@ -1084,12 +1101,12 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
std::tie(model10, slicedModelOperationIndexToModelOperationIndex) = *slice10;
}
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations");
- Return<void> ret = recoverable<void, V1_0::IDevice>(
+ hardware::Return<void> ret = recoverable<void, V1_0::IDevice>(
__FUNCTION__, [&model10, &result](const sp<V1_0::IDevice>& device) {
return device->getSupportedOperations(
- model10,
- [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) {
- result = std::make_pair(convertToV1_3(error), supported);
+ model10, [&result](V1_0::ErrorStatus error,
+ const hardware::hidl_vec<bool>& supported) {
+ result = std::make_pair(uncheckedConvert(error), supported);
});
});
if (!ret.isOk()) {
@@ -1111,7 +1128,7 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
// handle is expected to come in as empty, and is only set to a fd when the function returns true.
// The file descriptor is always opened with both read and write permission.
static bool createCacheHandle(const std::string& cache, bool createIfNotExist,
- hidl_handle* handle) {
+ hardware::hidl_handle* handle) {
CHECK(handle->getNativeHandle() == nullptr);
int fd = open(cache.c_str(), createIfNotExist ? (O_RDWR | O_CREAT) : O_RDWR, S_IRUSR | S_IWUSR);
NN_RET_CHECK_GE(fd, 0);
@@ -1127,16 +1144,15 @@ static bool createCacheHandle(const std::string& cache, bool createIfNotExist,
// Opens a list of cache files and returns the handle vector. Returns empty vector on fail.
// The file descriptors are always opened with both read and write permission.
-static hidl_vec<hidl_handle> createCacheHandleVec(uint32_t numCacheFiles,
- const std::string& baseFileName,
- bool createIfNotExist) {
- CHECK(numCacheFiles <= static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
- hidl_vec<hidl_handle> handles(numCacheFiles);
+static hardware::hidl_vec<hardware::hidl_handle> createCacheHandleVec(
+ uint32_t numCacheFiles, const std::string& baseFileName, bool createIfNotExist) {
+ CHECK(numCacheFiles <= static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES));
+ hardware::hidl_vec<hardware::hidl_handle> handles(numCacheFiles);
for (uint32_t i = 0; i < numCacheFiles; i++) {
std::string filename = baseFileName + std::to_string(i);
VLOG(COMPILATION) << "Cache " << i << ": " << filename;
if (!createCacheHandle(filename, createIfNotExist, &handles[i])) {
- return hidl_vec<hidl_handle>();
+ return hardware::hidl_vec<hardware::hidl_handle>();
}
}
return handles;
@@ -1146,8 +1162,9 @@ static hidl_vec<hidl_handle> createCacheHandleVec(uint32_t numCacheFiles,
// fail and leaves the vectors empty. Each vector is expected to come in as empty.
static bool getCacheHandles(const std::string& cacheDir, const CacheToken& token,
const std::pair<uint32_t, uint32_t>& numCacheFiles,
- bool createIfNotExist, hidl_vec<hidl_handle>* modelCache,
- hidl_vec<hidl_handle>* dataCache) {
+ bool createIfNotExist,
+ hardware::hidl_vec<hardware::hidl_handle>* modelCache,
+ hardware::hidl_vec<hardware::hidl_handle>* dataCache) {
// The filename includes ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2 characters for token,
// and 1 character for model/data cache identifier.
std::string filename(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2 + 1, '0');
@@ -1193,7 +1210,7 @@ static std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelResu
if (status != ErrorStatus::NONE) {
LOG(ERROR) << prepareName << " on " << serviceName << " failed: "
- << "prepareReturnStatus=" << toString(status);
+ << "prepareReturnStatus=" << status;
return prepareModelFailure(status);
}
if (preparedModel == nullptr) {
@@ -1214,7 +1231,7 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
ANEURALNETWORKS_DEAD_OBJECT, nullptr};
// Get cache files if they exist, otherwise create them.
- hidl_vec<hidl_handle> modelCache, dataCache;
+ hardware::hidl_vec<hardware::hidl_handle> modelCache, dataCache;
if (!maybeToken.has_value() ||
!getCacheHandles(cacheDir, *maybeToken, kNumberOfCacheFilesNeeded,
/*createIfNotExist=*/true, &modelCache, &dataCache)) {
@@ -1226,19 +1243,22 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
static const CacheToken kNullToken{};
const CacheToken token = maybeToken.value_or(kNullToken);
+ const V1_3::Model model13 = convertToV1_3(model);
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
// If 1.3 device, try preparing model
if (getDevice<V1_3::IDevice>() != nullptr) {
const auto otp = makeTimePoint(deadline);
- const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>(
- __FUNCTION__,
- [&model, preference, priority, &otp, &modelCache, &dataCache, &token,
- &callback](const sp<V1_3::IDevice>& device) {
- return device->prepareModel_1_3(model, preference, priority, otp, modelCache,
- dataCache, token, callback);
- },
- callback);
+ const hardware::Return<V1_3::ErrorStatus> ret =
+ recoverable<V1_3::ErrorStatus, V1_3::IDevice>(
+ __FUNCTION__,
+ [&model13, preference, priority, &otp, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_3::IDevice>& device) {
+ return device->prepareModel_1_3(
+ model13, convertToV1_1(preference), convertToV1_3(priority),
+ convertToV1_3(otp), modelCache, dataCache, token, callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description();
return kDeadObject;
@@ -1247,9 +1267,10 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description();
return prepareModelFailure();
}
- if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_3 returned " << toString(static_cast<ErrorStatus>(ret));
- return prepareModelFailure(ret);
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_3 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel_1_3", kServiceName);
}
@@ -1264,20 +1285,22 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
// but could be larger for other models).
NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
"VersionedIDevice::prepareModel_1_2");
- compliant = compliantWithV1_2(model);
+ compliant = compliantWithV1_2(model13);
if (compliant) {
- model12 = convertToV1_2(model); // copy is elided
+ model12 = convertToV1_2(model13); // copy is elided
}
}
if (compliant) {
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
- __FUNCTION__,
- [&model12, &preference, &modelCache, &dataCache, &token,
- &callback](const sp<V1_2::IDevice>& device) {
- return device->prepareModel_1_2(model12, preference, modelCache, dataCache,
- token, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
+ __FUNCTION__,
+ [&model12, &preference, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_2::IDevice>& device) {
+ return device->prepareModel_1_2(model12, convertToV1_1(preference),
+ modelCache, dataCache, token,
+ callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
return kDeadObject;
@@ -1286,10 +1309,10 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_2 returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_2 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel_1_2", kServiceName);
}
@@ -1308,18 +1331,20 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
// but could be larger for other models).
NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
"VersionedIDevice::prepareModel_1_1");
- compliant = compliantWithV1_1(model);
+ compliant = compliantWithV1_1(model13);
if (compliant) {
- model11 = convertToV1_1(model); // copy is elided
+ model11 = convertToV1_1(model13); // copy is elided
}
}
if (compliant) {
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_1::IDevice>(
- __FUNCTION__,
- [&model11, &preference, &callback](const sp<V1_1::IDevice>& device) {
- return device->prepareModel_1_1(model11, preference, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_1::IDevice>(
+ __FUNCTION__,
+ [&model11, &preference, &callback](const sp<V1_1::IDevice>& device) {
+ return device->prepareModel_1_1(model11, convertToV1_1(preference),
+ callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description();
return kDeadObject;
@@ -1328,10 +1353,10 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_1 returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_1 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel_1_1", kServiceName);
}
@@ -1350,18 +1375,19 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
// but could be larger for other models).
NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
"VersionedIDevice::prepareModel");
- compliant = compliantWithV1_0(model);
+ compliant = compliantWithV1_0(model13);
if (compliant) {
- model10 = convertToV1_0(model); // copy is elided
+ model10 = convertToV1_0(model13); // copy is elided
}
}
if (compliant) {
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_0::IDevice>(
- __FUNCTION__,
- [&model10, &callback](const sp<V1_0::IDevice>& device) {
- return device->prepareModel(model10, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_0::IDevice>(
+ __FUNCTION__,
+ [&model10, &callback](const sp<V1_0::IDevice>& device) {
+ return device->prepareModel(model10, callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModel failure: " << ret.description();
return kDeadObject;
@@ -1370,10 +1396,10 @@ std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepa
LOG(ERROR) << "prepareModel failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModel", kServiceName);
}
@@ -1398,24 +1424,25 @@ VersionedIDevice::prepareModelFromCacheInternal(const std::optional<Deadline>& d
ANEURALNETWORKS_DEAD_OBJECT, nullptr};
// Get cache files if they exist, otherwise return from the function early.
- hidl_vec<hidl_handle> modelCache, dataCache;
+ hardware::hidl_vec<hardware::hidl_handle> modelCache, dataCache;
if (!getCacheHandles(cacheDir, token, kNumberOfCacheFilesNeeded,
/*createIfNotExist=*/false, &modelCache, &dataCache)) {
return prepareModelFailure();
}
- // version 1.3+ HAL
+ // version 1.3 HAL
if (getDevice<V1_3::IDevice>() != nullptr) {
const auto otp = makeTimePoint(deadline);
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
- const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>(
- __FUNCTION__,
- [&otp, &modelCache, &dataCache, &token,
- &callback](const sp<V1_3::IDevice>& device) {
- return device->prepareModelFromCache_1_3(otp, modelCache, dataCache, token,
- callback);
- },
- callback);
+ const hardware::Return<V1_3::ErrorStatus> ret =
+ recoverable<V1_3::ErrorStatus, V1_3::IDevice>(
+ __FUNCTION__,
+ [&otp, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_3::IDevice>& device) {
+ return device->prepareModelFromCache_1_3(convertToV1_3(otp), modelCache,
+ dataCache, token, callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModelFromCache_1_3 failure: " << ret.description();
return kDeadObject;
@@ -1424,10 +1451,10 @@ VersionedIDevice::prepareModelFromCacheInternal(const std::optional<Deadline>& d
LOG(ERROR) << "prepareModelFromCache_1_3 failure: " << ret.description();
return prepareModelFailure();
}
- if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModelFromCache_1_3 returned "
- << toString(static_cast<ErrorStatus>(ret));
- return prepareModelFailure(ret);
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModelFromCache_1_3 returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModelFromCache_1_3", kServiceName);
}
@@ -1435,12 +1462,15 @@ VersionedIDevice::prepareModelFromCacheInternal(const std::optional<Deadline>& d
// version 1.2 HAL
if (getDevice<V1_2::IDevice>() != nullptr) {
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
- const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
- __FUNCTION__,
- [&modelCache, &dataCache, &token, &callback](const sp<V1_2::IDevice>& device) {
- return device->prepareModelFromCache(modelCache, dataCache, token, callback);
- },
- callback);
+ const hardware::Return<V1_0::ErrorStatus> ret =
+ recoverable<V1_0::ErrorStatus, V1_2::IDevice>(
+ __FUNCTION__,
+ [&modelCache, &dataCache, &token,
+ &callback](const sp<V1_2::IDevice>& device) {
+ return device->prepareModelFromCache(modelCache, dataCache, token,
+ callback);
+ },
+ callback);
if (ret.isDeadObject()) {
LOG(ERROR) << "prepareModelFromCache failure: " << ret.description();
return kDeadObject;
@@ -1449,10 +1479,10 @@ VersionedIDevice::prepareModelFromCacheInternal(const std::optional<Deadline>& d
LOG(ERROR) << "prepareModelFromCache failure: " << ret.description();
return prepareModelFailure();
}
- const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret);
- if (status != V1_0::ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModelFromCache returned " << toString(status);
- return prepareModelFailure(convertToV1_3(status));
+ const ErrorStatus status = uncheckedConvert(ret);
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModelFromCache returned " << status;
+ return prepareModelFailure(status);
}
return prepareModelResult(*callback, "prepareModelFromCache", kServiceName);
}
@@ -1520,28 +1550,31 @@ const std::string& VersionedIDevice::getName() const {
return kServiceName;
}
-std::tuple<ErrorStatus, sp<IBuffer>, uint32_t> VersionedIDevice::allocate(
- const BufferDesc& desc,
+std::tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, uint32_t> VersionedIDevice::allocate(
+ const V1_3::BufferDesc& desc,
const std::vector<std::shared_ptr<VersionedIPreparedModel>>& versionedPreparedModels,
- const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles) const {
- const auto kFailure = std::make_tuple<ErrorStatus, sp<IBuffer>, uint32_t>(
- ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles) const {
+ const auto kFailure = std::make_tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, uint32_t>(
+ V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- // version 1.3+ HAL
+ // version 1.3 HAL
if (getDevice<V1_3::IDevice>() != nullptr) {
- hidl_vec<sp<V1_3::IPreparedModel>> preparedModels(versionedPreparedModels.size());
+ hardware::hidl_vec<sp<V1_3::IPreparedModel>> preparedModels(versionedPreparedModels.size());
std::transform(versionedPreparedModels.begin(), versionedPreparedModels.end(),
preparedModels.begin(),
[](const auto& preparedModel) { return preparedModel->getV1_3(); });
- std::tuple<ErrorStatus, sp<IBuffer>, int32_t> result;
- const Return<void> ret = recoverable<void, V1_3::IDevice>(
+ std::tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, int32_t> result;
+ const hardware::Return<void> ret = recoverable<void, V1_3::IDevice>(
__FUNCTION__, [&](const sp<V1_3::IDevice>& device) {
- return device->allocate(desc, preparedModels, inputRoles, outputRoles,
- [&result](ErrorStatus error, const sp<IBuffer>& buffer,
- uint32_t token) {
- result = {error, buffer, token};
- });
+ return device->allocate(
+ desc, preparedModels, convertToV1_3(inputRoles),
+ convertToV1_3(outputRoles),
+ [&result](V1_3::ErrorStatus error, const sp<V1_3::IBuffer>& buffer,
+ uint32_t token) {
+ result = {error, buffer, token};
+ });
});
if (!ret.isOk()) {
LOG(ERROR) << "allocate failure: " << ret.description();
diff --git a/nn/runtime/VersionedInterfaces.h b/nn/runtime/VersionedInterfaces.h
index 1b8433e1c..d41dcd3ad 100644
--- a/nn/runtime/VersionedInterfaces.h
+++ b/nn/runtime/VersionedInterfaces.h
@@ -43,6 +43,8 @@ class IPreparedModelDeathHandler;
class MetaModel;
class VersionedIPreparedModel;
+using ModelFactory = std::function<Model()>;
+
/**
* Each class (VersionedIDevice, VersionedIPreparedModel) wraps a HIDL interface
* of any version to abstract away version differences. It allows the remainder
@@ -77,7 +79,7 @@ class VersionedIDevice {
* @return A valid VersionedIDevice object, otherwise nullptr.
*/
static std::shared_ptr<VersionedIDevice> create(std::string serviceName,
- const hal::DeviceFactory& makeDevice);
+ const HalDeviceFactory& makeDevice);
/**
* Constructor for the VersionedIDevice object.
@@ -98,18 +100,17 @@ class VersionedIDevice {
* newer interfaces, and a hidl_death_recipient that will proactively handle
* the case when the service containing the IDevice object crashes.
*/
- VersionedIDevice(hal::Capabilities capabilities,
- std::vector<hal::Extension> supportedExtensions, int32_t type,
- std::string versionString,
+ VersionedIDevice(Capabilities capabilities, std::vector<Extension> supportedExtensions,
+ int32_t type, std::string versionString,
std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
- std::string serviceName, const hal::DeviceFactory& makeDevice, Core core);
+ std::string serviceName, const HalDeviceFactory& makeDevice, Core core);
/**
* Gets the capabilities of a driver.
*
* @return capabilities Capabilities of the driver.
*/
- const hal::Capabilities& getCapabilities() const;
+ const Capabilities& getCapabilities() const;
/**
* Gets information about extensions supported by the driver implementation.
@@ -122,7 +123,7 @@ class VersionedIDevice {
*
* @return extensions A list of supported extensions.
*/
- const std::vector<hal::Extension>& getSupportedExtensions() const;
+ const std::vector<Extension>& getSupportedExtensions() const;
/**
* Gets the supported operations in a MetaModel.
@@ -152,7 +153,7 @@ class VersionedIDevice {
* corresponds with the index of the operation
* it is describing.
*/
- std::pair<hal::ErrorStatus, hal::hidl_vec<bool>> getSupportedOperations(
+ std::pair<ErrorStatus, std::vector<bool>> getSupportedOperations(
const MetaModel& metaModel) const;
/**
@@ -220,9 +221,9 @@ class VersionedIDevice {
* that has been prepared for execution, else nullptr.
*/
std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModel(
- const hal::ModelFactory& makeModel, hal::ExecutionPreference preference, hal::Priority,
+ const ModelFactory& makeModel, ExecutionPreference preference, Priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
- const std::optional<hal::CacheToken>& maybeToken) const;
+ const std::optional<CacheToken>& maybeToken) const;
/**
* Returns the feature level of a driver.
@@ -366,11 +367,11 @@ class VersionedIDevice {
* execution. If the buffer was unable to be allocated due to an error, the token must be
* 0.
*/
- std::tuple<hal::ErrorStatus, sp<hal::IBuffer>, uint32_t> allocate(
- const hal::BufferDesc& desc,
+ std::tuple<V1_3::ErrorStatus, sp<V1_3::IBuffer>, uint32_t> allocate(
+ const V1_3::BufferDesc& desc,
const std::vector<std::shared_ptr<VersionedIPreparedModel>>& preparedModels,
- const hal::hidl_vec<hal::BufferRole>& inputRoles,
- const hal::hidl_vec<hal::BufferRole>& outputRoles) const;
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles) const;
/**
* Blocks until the device is not in a bad state.
@@ -382,20 +383,20 @@ class VersionedIDevice {
private:
// Cached initialization results.
- const hal::Capabilities kCapabilities;
- const std::vector<hal::Extension> kSupportedExtensions;
+ const Capabilities kCapabilities;
+ const std::vector<Extension> kSupportedExtensions;
const int32_t kType;
const std::string kVersionString;
const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
// internal methods to prepare a model
std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelInternal(
- const hal::Model& model, hal::ExecutionPreference preference, hal::Priority priority,
+ const Model& model, ExecutionPreference preference, Priority priority,
const std::optional<Deadline>& deadline, const std::string& cacheDir,
- const std::optional<hal::CacheToken>& maybeToken) const;
+ const std::optional<CacheToken>& maybeToken) const;
std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelFromCacheInternal(
const std::optional<Deadline>& deadline, const std::string& cacheDir,
- const hal::CacheToken& token) const;
+ const CacheToken& token) const;
/**
* This is a utility class for VersionedIDevice that encapsulates a
@@ -426,7 +427,7 @@ class VersionedIDevice {
* the case when the service containing the IDevice
* object crashes.
*/
- Core(sp<hal::V1_0::IDevice> device, sp<IDeviceDeathHandler> deathHandler);
+ Core(sp<V1_0::IDevice> device, sp<IDeviceDeathHandler> deathHandler);
/**
* Destructor for the Core object.
@@ -456,7 +457,7 @@ class VersionedIDevice {
* interface.
* @return A valid Core object, otherwise nullopt.
*/
- static std::optional<Core> create(sp<hal::V1_0::IDevice> device);
+ static std::optional<Core> create(sp<V1_0::IDevice> device);
/**
* Returns sp<*::IDevice> that is a downcast of the sp<V1_0::IDevice>
@@ -466,19 +467,19 @@ class VersionedIDevice {
template <typename T_IDevice>
sp<T_IDevice> getDevice() const;
template <>
- sp<hal::V1_0::IDevice> getDevice() const {
+ sp<V1_0::IDevice> getDevice() const {
return mDeviceV1_0;
}
template <>
- sp<hal::V1_1::IDevice> getDevice() const {
+ sp<V1_1::IDevice> getDevice() const {
return mDeviceV1_1;
}
template <>
- sp<hal::V1_2::IDevice> getDevice() const {
+ sp<V1_2::IDevice> getDevice() const {
return mDeviceV1_2;
}
template <>
- sp<hal::V1_3::IDevice> getDevice() const {
+ sp<V1_3::IDevice> getDevice() const {
return mDeviceV1_3;
}
@@ -511,10 +512,10 @@ class VersionedIDevice {
* Idiomatic usage: if mDeviceV1_1 is non-null, do V1_1 dispatch; otherwise,
* do V1_0 dispatch.
*/
- sp<hal::V1_0::IDevice> mDeviceV1_0;
- sp<hal::V1_1::IDevice> mDeviceV1_1;
- sp<hal::V1_2::IDevice> mDeviceV1_2;
- sp<hal::V1_3::IDevice> mDeviceV1_3;
+ sp<V1_0::IDevice> mDeviceV1_0;
+ sp<V1_1::IDevice> mDeviceV1_1;
+ sp<V1_2::IDevice> mDeviceV1_2;
+ sp<V1_3::IDevice> mDeviceV1_3;
/**
* HIDL callback to be invoked if the service for mDeviceV1_0 crashes.
@@ -548,16 +549,16 @@ class VersionedIDevice {
// If a callback is provided, this method protects it against driver death
// and waits for it (callback->wait()).
template <typename T_Return, typename T_IDevice, typename T_Callback = std::nullptr_t>
- hal::Return<T_Return> recoverable(
+ hardware::Return<T_Return> recoverable(
const char* context,
- const std::function<hal::Return<T_Return>(const sp<T_IDevice>&)>& fn,
+ const std::function<hardware::Return<T_Return>(const sp<T_IDevice>&)>& fn,
const T_Callback& callback = nullptr) const EXCLUDES(mMutex);
// The name of the service that implements the driver.
const std::string kServiceName;
// Factory function object to generate an IDevice object.
- const hal::DeviceFactory kMakeDevice;
+ const HalDeviceFactory kMakeDevice;
// Guards access to mCore.
mutable std::shared_mutex mMutex;
@@ -591,7 +592,7 @@ class VersionedIPreparedModel {
* the case when the service containing the IDevice
* object crashes.
*/
- VersionedIPreparedModel(sp<hal::V1_0::IPreparedModel> preparedModel,
+ VersionedIPreparedModel(sp<V1_0::IPreparedModel> preparedModel,
sp<IPreparedModelDeathHandler> deathHandler);
/**
@@ -676,10 +677,9 @@ class VersionedIPreparedModel {
* UINT64_MAX. A driver may choose to report any time as UINT64_MAX,
* indicating that measurement is not available.
*/
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> execute(
- const hal::Request& request, hal::MeasureTiming measure,
- const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const;
+ std::tuple<int, std::vector<OutputShape>, Timing> execute(
+ const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const;
/**
* Creates a burst controller on a prepared model.
@@ -763,30 +763,28 @@ class VersionedIPreparedModel {
* sync execution. Either IFencedExecutionCallback will be
* returned or optional timing information is returned
*/
- std::tuple<int, hal::hidl_handle, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced(
- const hal::Request& request, const hal::hidl_vec<hal::hidl_handle>& waitFor,
- hal::MeasureTiming measure, const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration,
- const hal::OptionalTimeoutDuration& timeoutDurationAfterFence);
+ std::tuple<int, hardware::hidl_handle, sp<V1_3::IFencedExecutionCallback>, Timing>
+ executeFenced(const Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence);
private:
friend class VersionedIDevice;
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> executeAsynchronously(
- const hal::Request& request, hal::MeasureTiming timing,
- const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration) const;
- std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> executeSynchronously(
- const hal::Request& request, hal::MeasureTiming measure,
- const std::optional<Deadline>& deadline,
- const hal::OptionalTimeoutDuration& loopTimeoutDuration) const;
+ std::tuple<int, std::vector<OutputShape>, Timing> executeAsynchronously(
+ const Request& request, MeasureTiming timing, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration) const;
+ std::tuple<int, std::vector<OutputShape>, Timing> executeSynchronously(
+ const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration) const;
/**
* Returns sp<V1_3::IPreparedModel> that is a downcast of the sp<V1_0::IPreparedModel>
* passed to the constructor. This will be nullptr if that IPreparedModel is
* not actually of the specified downcast type.
*/
- sp<hal::V1_3::IPreparedModel> getV1_3() const { return mPreparedModelV1_3; }
+ sp<V1_3::IPreparedModel> getV1_3() const { return mPreparedModelV1_3; }
/**
* All versions of IPreparedModel are necessary because the preparedModel could be v1.0,
@@ -810,9 +808,9 @@ class VersionedIPreparedModel {
* otherwise, if mPreparedModelV1_2 is non-null, do V1_2 dispatch;
* otherwise, do V1_0 dispatch.
*/
- sp<hal::V1_0::IPreparedModel> mPreparedModelV1_0;
- sp<hal::V1_2::IPreparedModel> mPreparedModelV1_2;
- sp<hal::V1_3::IPreparedModel> mPreparedModelV1_3;
+ sp<V1_0::IPreparedModel> mPreparedModelV1_0;
+ sp<V1_2::IPreparedModel> mPreparedModelV1_2;
+ sp<V1_3::IPreparedModel> mPreparedModelV1_3;
/**
* HIDL callback to be invoked if the service for mPreparedModelV1_0 crashes.
diff --git a/nn/runtime/test/Android.bp b/nn/runtime/test/Android.bp
index 4ea388a1f..ac53b9a10 100644
--- a/nn/runtime/test/Android.bp
+++ b/nn/runtime/test/Android.bp
@@ -134,8 +134,6 @@ cc_defaults {
"fibonacci_extension/FibonacciExtensionTest.cpp",
"TestMain.cpp",
-
- "Bridge.cpp",
],
static_libs: [
"android.hardware.neuralnetworks@1.0-adapter-helper",
diff --git a/nn/runtime/test/Bridge.cpp b/nn/runtime/test/Bridge.cpp
deleted file mode 100644
index 574025620..000000000
--- a/nn/runtime/test/Bridge.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// There are name clashes between NeuralNetworksWrapper.h and
-// HalInterfaces.h. Many tests include the former; many internal
-// header files (nn/runtime/*.h) include the latter. This file
-// contains a few utilities for tests to call that trampoline to the
-// internal headers.
-
-#include "GraphDump.h"
-#include "ModelBuilder.h"
-
-namespace android {
-namespace nn {
-namespace bridge_tests {
-
-void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream) {
- ::android::nn::graphDump(name, model->makeHidlModel(), outStream);
-}
-
-} // namespace bridge_tests
-} // namespace nn
-} // namespace android
diff --git a/nn/runtime/test/Bridge.h b/nn/runtime/test/Bridge.h
deleted file mode 100644
index f067df0f3..000000000
--- a/nn/runtime/test/Bridge.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// There are name clashes between NeuralNetworksWrapper.h and
-// HalInterfaces.h. Many tests include the former; many internal
-// header files (nn/runtime/*.h) include the latter. This file
-// contains a few utilities for tests to call that trampoline to the
-// internal headers.
-
-#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
-#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
-
-#include <iostream>
-
-namespace android {
-namespace nn {
-
-class ModelBuilder;
-
-namespace bridge_tests {
-
-void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream = &std::cout);
-
-} // namespace bridge_tests
-
-} // namespace nn
-} // namespace android
-
-#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
diff --git a/nn/runtime/test/TestCompilationCaching.cpp b/nn/runtime/test/TestCompilationCaching.cpp
index 2311685d5..1a1cdc6c2 100644
--- a/nn/runtime/test/TestCompilationCaching.cpp
+++ b/nn/runtime/test/TestCompilationCaching.cpp
@@ -31,16 +31,17 @@
#include "TestNeuralNetworksWrapper.h"
using namespace android::nn;
-using namespace hal;
-using Result = test_wrapper::Result;
+namespace hardware = android::hardware;
+using WrapperResult = test_wrapper::Result;
using Type = test_wrapper::Type;
-const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
template <typename T>
using MQDescriptorSync = ::android::hardware::MQDescriptorSync<T>;
+using android::sp;
namespace android::hardware::neuralnetworks::V1_0 {
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
+::std::ostream& operator<<(::std::ostream& os, V1_3::ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
@@ -66,10 +67,10 @@ std::ostream& operator<<(std::ostream& os, HasCalledPrepareModel hasCalledPrepar
}
// Whether the driver is expected to be registered because it can pass initialization.
-bool canDeviceBeRegistered(ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
+bool canDeviceBeRegistered(V1_3::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
constexpr uint32_t maxNumCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
- return error == ErrorStatus::NONE && numModelCache <= maxNumCacheFiles &&
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES);
+ return error == V1_3::ErrorStatus::NONE && numModelCache <= maxNumCacheFiles &&
numDataCache <= maxNumCacheFiles;
}
@@ -94,55 +95,59 @@ class CachingDriver : public sample_driver::SampleDriver {
private:
static constexpr size_t kCacheSize = 256;
- class CachingPreparedModel : public IPreparedModel {
+ class CachingPreparedModel : public V1_3::IPreparedModel {
public:
CachingPreparedModel() = default;
- Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
- const sp<V1_0::IExecutionCallback>&) override {
+ hardware::Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+ const sp<V1_0::IExecutionCallback>&) override {
return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
- const sp<V1_2::IExecutionCallback>&) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request&, V1_2::MeasureTiming,
+ const sp<V1_2::IExecutionCallback>&) override {
return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
- const OptionalTimePoint&,
- const OptionalTimeoutDuration&,
- const sp<V1_3::IExecutionCallback>&) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request&, V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const sp<V1_3::IExecutionCallback>&) override {
return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming,
+ executeSynchronously_cb cb) override {
cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
- Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming,
- const OptionalTimePoint&,
- const OptionalTimeoutDuration&,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeSynchronously_1_3_cb cb) override {
cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
- Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
- const MQDescriptorSync<V1_2::FmqRequestDatum>&,
- const MQDescriptorSync<V1_2::FmqResultDatum>&,
- configureExecutionBurst_cb cb) override {
+ hardware::Return<void> configureExecutionBurst(
+ const sp<V1_2::IBurstCallback>&, const MQDescriptorSync<V1_2::FmqRequestDatum>&,
+ const MQDescriptorSync<V1_2::FmqResultDatum>&,
+ configureExecutionBurst_cb cb) override {
cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
- return Void();
+ return hardware::Void();
}
- Return<void> executeFenced(const hal::Request&, const hidl_vec<hidl_handle>&, MeasureTiming,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const OptionalTimeoutDuration&, executeFenced_cb cb) {
- cb(ErrorStatus::DEVICE_UNAVAILABLE, hidl_handle(nullptr), nullptr);
- return Void();
+ hardware::Return<void> executeFenced(const V1_3::Request&,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeFenced_cb cb) {
+ cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
};
public:
- CachingDriver(std::string_view name, ErrorStatus errorStatusGetNumCacheFiles,
+ CachingDriver(std::string_view name, V1_3::ErrorStatus errorStatusGetNumCacheFiles,
uint32_t numModelCache, uint32_t numDataCache,
- ErrorStatus errorStatusPrepareFromCache)
+ V1_3::ErrorStatus errorStatusPrepareFromCache)
: SampleDriver(name.data()),
mErrorStatusGetNumCacheFiles(errorStatusGetNumCacheFiles),
mNumModelCache(numModelCache),
@@ -156,39 +161,40 @@ class CachingDriver : public sample_driver::SampleDriver {
~CachingDriver() override {}
// Reports faster than cpu.
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
// Reports supporting all operations.
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
// Reports according to mGetNumCacheFiles.
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
cb(convertToV1_0(mErrorStatusGetNumCacheFiles), mNumModelCache, mNumDataCache);
- return Void();
+ return hardware::Void();
}
// Generates CachingPreparedModel.
// Writes the cache entry per mCacheXData and sets mHasCalledPrepareModel.
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const Model&, ExecutionPreference, Priority, const OptionalTimePoint&,
- const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model&, V1_1::ExecutionPreference, V1_3::Priority,
+ const V1_3::OptionalTimePoint&,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& cb) override {
checkNumberOfCacheHandles(modelCacheHandle.size(), dataCacheHandle.size());
if (modelCacheHandle.size() != 0 || dataCacheHandle.size() != 0) {
@@ -204,9 +210,10 @@ class CachingDriver : public sample_driver::SampleDriver {
// Checks if the cache entry is correct, notifies error status according to
// mErrorStatusPrepareFromCache, sets mHasCalledPrepareModelFromCache.
- Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
- const OptionalTimePoint&, const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
+ const V1_3::OptionalTimePoint&,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) override {
readFromCache(modelCacheHandle, mModelCacheData);
readFromCache(dataCacheHandle, mDataCacheData);
@@ -236,7 +243,8 @@ class CachingDriver : public sample_driver::SampleDriver {
}
}
- void writeToCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& cache) {
+ void writeToCache(const hardware::hidl_vec<hardware::hidl_handle>& handles,
+ const std::vector<uint8_t>& cache) {
for (uint32_t i = 0; i < handles.size(); ++i) {
ASSERT_EQ(handles[i]->numFds, 1);
EXPECT_EQ(write(handles[i]->data[0], cache.data(), kCacheSize),
@@ -244,7 +252,8 @@ class CachingDriver : public sample_driver::SampleDriver {
}
}
- void readFromCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& expected) {
+ void readFromCache(const hardware::hidl_vec<hardware::hidl_handle>& handles,
+ const std::vector<uint8_t>& expected) {
for (uint32_t i = 0; i < handles.size(); ++i) {
ASSERT_EQ(handles[i]->numFds, 1);
std::vector<uint8_t> actual(kCacheSize);
@@ -257,10 +266,10 @@ class CachingDriver : public sample_driver::SampleDriver {
std::vector<uint8_t> mModelCacheData;
std::vector<uint8_t> mDataCacheData;
- const ErrorStatus mErrorStatusGetNumCacheFiles;
+ const V1_3::ErrorStatus mErrorStatusGetNumCacheFiles;
const uint32_t mNumModelCache;
const uint32_t mNumDataCache;
- const ErrorStatus mErrorStatusPrepareFromCache;
+ const V1_3::ErrorStatus mErrorStatusPrepareFromCache;
bool mHasCalledPrepareModelFromCache = false;
HasCalledPrepareModel mHasCalledPrepareModel = HasCalledPrepareModel::NO;
@@ -279,7 +288,7 @@ void CreateBroadcastAddModel(test_wrapper::Model* model) {
model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c});
model->identifyInputsAndOutputs({a, b}, {c});
ASSERT_TRUE(model->isValid());
- ASSERT_EQ(model->finish(), Result::NO_ERROR);
+ ASSERT_EQ(model->finish(), WrapperResult::NO_ERROR);
}
void getDeviceWithName(std::string_view deviceName, const ANeuralNetworksDevice** outputDevice) {
@@ -307,17 +316,17 @@ void getDeviceWithName(std::string_view deviceName, const ANeuralNetworksDevice*
// - ErrorStatus returning from getNumberOfCacheFilesNeeded
// - Number of model cache files returning from getNumberOfCacheFilesNeeded
// - Number of data cache files returning from getNumberOfCacheFilesNeeded
-using DeviceRegistrationTestParam = std::tuple<ErrorStatus, uint32_t, uint32_t>;
+using DeviceRegistrationTestParam = std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t>;
class DeviceRegistrationTest : public ::testing::TestWithParam<DeviceRegistrationTestParam> {
protected:
static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching";
- const ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam());
+ const V1_3::ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam());
const uint32_t kNumModelCache = std::get<1>(GetParam());
const uint32_t kNumDataCache = std::get<2>(GetParam());
const sp<CachingDriver> kDriver =
new CachingDriver(kDeviceName, kErrorStatusGetNumCacheFiles, kNumModelCache,
- kNumDataCache, ErrorStatus::NONE);
+ kNumDataCache, V1_3::ErrorStatus::NONE);
};
TEST_P(DeviceRegistrationTest, CachingFailure) {
@@ -344,7 +353,7 @@ TEST_P(DeviceRegistrationTest, CachingFailure) {
// - Number of model cache files returning from getNumberOfCacheFilesNeeded
// - Number of data cache files returning from getNumberOfCacheFilesNeeded
// - ErrorStatus returning from prepareModelFromCache_1_3
-using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, ErrorStatus>;
+using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, V1_3::ErrorStatus>;
class CompilationCachingTest : public ::testing::TestWithParam<CompilationCachingTestParam> {
protected:
@@ -390,27 +399,29 @@ class CompilationCachingTest : public ::testing::TestWithParam<CompilationCachin
}
void createCache() {
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, ErrorStatus::NONE);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache,
+ kNumDataCache, V1_3::ErrorStatus::NONE);
compileModel(driver, /*withToken=*/true);
}
static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching";
const uint32_t kNumModelCache = std::get<0>(GetParam());
const uint32_t kNumDataCache = std::get<1>(GetParam());
- const ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam());
+ const V1_3::ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam());
const bool kIsCachingSupported = isCachingSupported(kNumModelCache, kNumDataCache);
test_wrapper::Model mModel;
std::string mCacheDir;
- const CacheToken kToken{};
+ const HalCacheToken kToken{};
};
TEST_P(CompilationCachingTest, TokenProvidedAndCacheNotExist) {
if (DeviceManager::get()->getUseCpuOnly()) {
return;
}
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/true);
// When cache file does not exist, the runtime should never call prepareModelFromCache_1_3.
@@ -427,8 +438,9 @@ TEST_P(CompilationCachingTest, TokenProvidedAndCacheExist) {
return;
}
createCache();
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/true);
// When cache files exist, the runtime should call prepareModelFromCache_1_3 iff caching
@@ -437,7 +449,7 @@ TEST_P(CompilationCachingTest, TokenProvidedAndCacheExist) {
HasCalledPrepareModel expectHasCalledPrepareModel;
if (kIsCachingSupported) {
- if (kErrorStatusPrepareFromCache == ErrorStatus::NONE) {
+ if (kErrorStatusPrepareFromCache == V1_3::ErrorStatus::NONE) {
// The runtime should not call prepareModel_1_3 iff caching supported and
// prepareModelFromCache_1_3 succeeds.
expectHasCalledPrepareModel = HasCalledPrepareModel::NO;
@@ -457,8 +469,9 @@ TEST_P(CompilationCachingTest, TokenNotProvided) {
if (DeviceManager::get()->getUseCpuOnly()) {
return;
}
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/false);
// When no NDK token is provided by the client, the runtime should never call
@@ -468,15 +481,15 @@ TEST_P(CompilationCachingTest, TokenNotProvided) {
}
static const auto kErrorStatusGetNumCacheFilesChoices =
- testing::Values(ErrorStatus::NONE, ErrorStatus::DEVICE_UNAVAILABLE);
+ testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::DEVICE_UNAVAILABLE);
static const auto kNumCacheChoices =
- testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES),
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES) + 1);
+ testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES),
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES) + 1);
static const auto kNumValidCacheChoices =
- testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES));
static const auto kErrorStatusPrepareFromCacheChoices =
- testing::Values(ErrorStatus::NONE, ErrorStatus::GENERAL_FAILURE,
- ErrorStatus::DEVICE_UNAVAILABLE, ErrorStatus::INVALID_ARGUMENT);
+ testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE,
+ V1_3::ErrorStatus::DEVICE_UNAVAILABLE, V1_3::ErrorStatus::INVALID_ARGUMENT);
INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, DeviceRegistrationTest,
testing::Combine(kErrorStatusGetNumCacheFilesChoices, kNumCacheChoices,
diff --git a/nn/runtime/test/TestCompliance.cpp b/nn/runtime/test/TestCompliance.cpp
index d756c2414..299eebcf7 100644
--- a/nn/runtime/test/TestCompliance.cpp
+++ b/nn/runtime/test/TestCompliance.cpp
@@ -27,7 +27,6 @@
namespace android::nn::compliance_test {
-using namespace hal;
using namespace test_helper;
using HidlModel = V1_3::Model;
using WrapperModel = test_wrapper::Model;
@@ -42,7 +41,7 @@ static HidlModel createHidlModel(const WrapperModel& wrapperModel) {
auto modelBuilder = reinterpret_cast<const ModelBuilder*>(wrapperModel.getHandle());
EXPECT_TRUE(modelBuilder->isFinished());
EXPECT_TRUE(modelBuilder->isValid());
- return modelBuilder->makeHidlModel();
+ return convertToV1_3(modelBuilder->makeModel());
}
static void testAvailableSinceV1_3(const WrapperModel& wrapperModel) {
@@ -73,12 +72,12 @@ static void testAvailableSinceV1_0(const WrapperModel& wrapperModel) {
ASSERT_TRUE(compliantWithV1_0(hidlModel));
}
-static void testAvailableSinceV1_2(const Request& request) {
+static void testAvailableSinceV1_2(const V1_3::Request& request) {
ASSERT_FALSE(compliantWithV1_0(request));
ASSERT_TRUE(compliantWithV1_2(request));
}
-static void testAvailableSinceV1_3(const Request& request) {
+static void testAvailableSinceV1_3(const V1_3::Request& request) {
ASSERT_FALSE(compliantWithV1_0(request));
ASSERT_FALSE(compliantWithV1_2(request));
}
@@ -172,20 +171,20 @@ TEST_F(ComplianceTest, HardwareBufferModel) {
TEST_F(ComplianceTest, HardwareBufferRequest) {
const auto [n, ahwb] = MemoryRuntimeAHWB::create(1024);
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
+ V1_3::Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
ASSERT_TRUE(ahwbMemoryPool.hidlMemory().valid());
// AHardwareBuffer as input.
- testAvailableSinceV1_2(Request{
+ testAvailableSinceV1_2(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {ahwbMemoryPool, sharedMemoryPool},
});
// AHardwareBuffer as output.
- testAvailableSinceV1_2(Request{
+ testAvailableSinceV1_2(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {sharedMemoryPool, ahwbMemoryPool},
@@ -194,20 +193,20 @@ TEST_F(ComplianceTest, HardwareBufferRequest) {
#endif
TEST_F(ComplianceTest, DeviceMemory) {
- Request::MemoryPool sharedMemoryPool, deviceMemoryPool;
+ V1_3::Request::MemoryPool sharedMemoryPool, deviceMemoryPool;
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
deviceMemoryPool.token(1);
// Device memory as input.
- testAvailableSinceV1_3(Request{
+ testAvailableSinceV1_3(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {deviceMemoryPool, sharedMemoryPool},
});
// Device memory as output.
- testAvailableSinceV1_3(Request{
+ testAvailableSinceV1_3(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {sharedMemoryPool, deviceMemoryPool},
diff --git a/nn/runtime/test/TestExecution.cpp b/nn/runtime/test/TestExecution.cpp
index 3441f9fc4..5f012c3eb 100644
--- a/nn/runtime/test/TestExecution.cpp
+++ b/nn/runtime/test/TestExecution.cpp
@@ -38,49 +38,54 @@
namespace android {
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
using HidlModel = V1_3::Model;
using PreparedModelCallback = nn::PreparedModelCallback;
-using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
using WrapperCompilation = nn::test_wrapper::Compilation;
using WrapperEvent = nn::test_wrapper::Event;
using WrapperExecution = nn::test_wrapper::Execution;
using WrapperModel = nn::test_wrapper::Model;
using WrapperOperandType = nn::test_wrapper::OperandType;
+using WrapperResult = nn::test_wrapper::Result;
using WrapperType = nn::test_wrapper::Type;
using nn::convertToV1_0;
+using nn::convertToV1_3;
+using nn::ErrorStatus;
template <typename T>
using MQDescriptorSync = hardware::MQDescriptorSync<T>;
namespace {
-const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
// Wraps the latest version of IPreparedModel to allow dummying up the execution status,
// and control when the execution finishes.
-class TestPreparedModelLatest : public IPreparedModel {
+class TestPreparedModelLatest : public V1_3::IPreparedModel {
public:
// If errorStatus is NONE, then execute behaves normally (and sends back
// the actual execution status). Otherwise, don't bother to execute, and
// just send back errorStatus (as the execution status, not the launch
// status).
- TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mPreparedModelV1_0(preparedModel),
mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
mErrorStatus(errorStatus) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_0 != nullptr) << "V1_0 prepared model is nullptr.";
std::thread([this, request, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_0->execute(request, callback);
} else {
@@ -90,16 +95,17 @@ class TestPreparedModelLatest : public IPreparedModel {
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
std::thread([this, request, measure, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_2->execute_1_2(request, measure, callback);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
callback->notify_1_2(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
} else {
callback->notify_1_2(convertToV1_0(mErrorStatus), {}, kBadTiming);
@@ -108,19 +114,20 @@ class TestPreparedModelLatest : public IPreparedModel {
return V1_0::ErrorStatus::NONE;
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<V1_3::IExecutionCallback>& callback) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
std::thread([this, request, measure, deadline, loopTimeoutDuration, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_3->execute_1_3(request, measure, deadline,
loopTimeoutDuration, callback);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
callback->notify_1_3(mErrorStatus, {shape}, kBadTiming);
} else {
callback->notify_1_3(mErrorStatus, {}, kBadTiming);
@@ -129,53 +136,55 @@ class TestPreparedModelLatest : public IPreparedModel {
return V1_3::ErrorStatus::NONE;
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_2->executeSynchronously(request, measure, cb);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
cb(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
- return Void();
+ return hardware::Void();
} else {
cb(convertToV1_0(mErrorStatus), {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> executeSynchronously_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ executeSynchronously_1_3_cb cb) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_3->executeSynchronously_1_3(request, measure, deadline,
loopTimeoutDuration, cb);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
cb(mErrorStatus, {shape}, kBadTiming);
- return Void();
+ return hardware::Void();
} else {
cb(mErrorStatus, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_2->configureExecutionBurst(callback, requestChannel,
resultChannel, cb);
} else {
cb(convertToV1_0(mErrorStatus), nullptr);
- return Void();
+ return hardware::Void();
}
}
@@ -184,25 +193,27 @@ class TestPreparedModelLatest : public IPreparedModel {
// SampleDriver is written with that in mind. Therefore, this
// implementation is synchronous also. If the SampleDriver is updated to
// return real sync fence, this must be updated.
- Return<void> executeFenced(const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration,
- executeFenced_cb cb) override {
+ hardware::Return<void> executeFenced(const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration,
+ executeFenced_cb cb) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
- CHECK(mErrorStatus != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE)
+ CHECK(mErrorStatus != V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE)
<< "executeFenced does not support dynamic output shape";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_3->executeFenced(request, waitFor, measure, deadline,
loopTimeoutDuration, duration, cb);
} else {
// Due to the limitations of the SampleDriver, all failures look
// like launch failures. If the SampleDriver is updated to return
// real sync fences, this must be updated.
- cb(mErrorStatus, hidl_handle(nullptr), nullptr);
+ cb(mErrorStatus, hardware::hidl_handle(nullptr), nullptr);
}
- return Void();
+ return hardware::Void();
}
// We can place the TestPreparedModelLatest system in a "pause" mode where
@@ -225,7 +236,7 @@ class TestPreparedModelLatest : public IPreparedModel {
const sp<V1_0::IPreparedModel> mPreparedModelV1_0;
const sp<V1_2::IPreparedModel> mPreparedModelV1_2;
const sp<V1_3::IPreparedModel> mPreparedModelV1_3;
- ErrorStatus mErrorStatus;
+ V1_3::ErrorStatus mErrorStatus;
static std::atomic<bool> mPauseExecutions;
static std::atomic<unsigned int> mExecutionsInFlight;
@@ -245,25 +256,27 @@ using TestPreparedModel13 = TestPreparedModelLatest;
// Like TestPreparedModelLatest, but implementing 1.2
class TestPreparedModel12 : public V1_2::IPreparedModel {
public:
- TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute_1_2(request, measure, callback);
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
return mLatestPreparedModel->executeSynchronously(request, measure, cb);
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -273,22 +286,22 @@ class TestPreparedModel12 : public V1_2::IPreparedModel {
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Like TestPreparedModelLatest, but implementing 1.0
class TestPreparedModel10 : public V1_0::IPreparedModel {
public:
- TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Behaves like SampleDriver, except that it produces wrapped IPreparedModel.
@@ -300,13 +313,13 @@ class TestDriver13 : public SampleDriver {
// status). Otherwise, don't bother to execute, and just send
// back errorStatus (as the execution status, not the launch
// status).
- TestDriver13(const std::string& name, ErrorStatus errorStatus)
+ TestDriver13(const std::string& name, V1_3::ErrorStatus errorStatus)
: SampleDriver(name.c_str()), mErrorStatus(errorStatus) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance =
@@ -314,41 +327,43 @@ class TestDriver13 : public SampleDriver {
.ifPerformance = kPerf,
.whilePerformance = kPerf};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
+ hardware::Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
model, preference, priority, deadline, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
}
- if (prepareModelReturn != ErrorStatus::NONE) {
+ if (prepareModelReturn != V1_3::ErrorStatus::NONE) {
actualCallback->notify_1_3(
- localCallback->getStatus(),
+ convertToV1_3(localCallback->getStatus()),
V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
return prepareModelReturn;
}
localCallback->wait();
if (localCallback->getStatus() != ErrorStatus::NONE) {
actualCallback->notify_1_3(
- localCallback->getStatus(),
+ convertToV1_3(localCallback->getStatus()),
V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
} else {
actualCallback->notify_1_3(
@@ -358,13 +373,14 @@ class TestDriver13 : public SampleDriver {
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
+ hardware::Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
model, preference, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
@@ -388,11 +404,11 @@ class TestDriver13 : public SampleDriver {
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_0::ErrorStatus> prepareModelReturn =
+ hardware::Return<V1_0::ErrorStatus> prepareModelReturn =
SampleDriver::prepareModel_1_1(model, preference, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
@@ -414,75 +430,79 @@ class TestDriver13 : public SampleDriver {
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
- actualCallback);
+ return prepareModel_1_1(nn::convertToV1_1(model),
+ V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, actualCallback);
}
private:
- ErrorStatus mErrorStatus;
+ V1_3::ErrorStatus mErrorStatus;
};
// Like TestDriver, but implementing 1.2
class TestDriver12 : public V1_2::IDevice {
public:
- TestDriver12(const std::string& name, ErrorStatus errorStatus)
+ TestDriver12(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
@@ -493,29 +513,29 @@ class TestDriver12 : public V1_2::IDevice {
// Like TestDriver, but implementing 1.1
class TestDriver11 : public V1_1::IDevice {
public:
- TestDriver11(const std::string& name, ErrorStatus errorStatus)
+ TestDriver11(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -528,21 +548,21 @@ class TestDriver11 : public V1_1::IDevice {
// Like TestDriver, but implementing 1.0
class TestDriver10 : public V1_0::IDevice {
public:
- TestDriver10(const std::string& name, ErrorStatus errorStatus)
+ TestDriver10(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
@@ -560,7 +580,7 @@ class TestCompilation : public WrapperCompilation {
// Otherwise, don't bother to execute, and just send back
// errorStatus (as the execution status, not the launch status).
TestCompilation(const WrapperModel* model, const std::string& deviceName,
- ErrorStatus errorStatus) {
+ V1_3::ErrorStatus errorStatus) {
std::vector<std::shared_ptr<Device>> devices;
auto device = DeviceManager::forTest_makeDriverDevice(
deviceName, new DriverClass(deviceName, errorStatus));
@@ -613,7 +633,7 @@ class TestIntrospectionCompilation : public WrapperCompilation {
template <class DriverClass>
class ExecutionTestTemplate
- : public ::testing::TestWithParam<std::tuple<ErrorStatus, Result, bool>> {
+ : public ::testing::TestWithParam<std::tuple<V1_3::ErrorStatus, WrapperResult, bool>> {
public:
ExecutionTestTemplate()
: kName(toString(std::get<0>(GetParam()))),
@@ -648,11 +668,11 @@ class ExecutionTestTemplate
// sends back the actual execution status). Otherwise, don't
// bother to execute, and just send back kForceErrorStatus (as the
// execution status, not the launch status).
- const ErrorStatus kForceErrorStatus;
+ const V1_3::ErrorStatus kForceErrorStatus;
- // What result do we expect from the execution? (The Result
+ // What result do we expect from the execution? (The WrapperResult
// equivalent of kForceErrorStatus.)
- const Result kExpectResult;
+ const WrapperResult kExpectResult;
// Whether mCompilation is created via Introspection API or not.
const bool kUseIntrospectionAPI;
@@ -663,8 +683,10 @@ class ExecutionTestTemplate
void setInputOutput(WrapperExecution* execution) {
mInputBuffer = kInputBuffer;
mOutputBuffer = kOutputBufferInitial;
- ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)), Result::NO_ERROR);
- ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)), Result::NO_ERROR);
+ ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)),
+ WrapperResult::NO_ERROR);
+ ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)),
+ WrapperResult::NO_ERROR);
}
const float kInputBuffer = 3.14;
@@ -683,7 +705,7 @@ class ExecutionTestTemplate
uint32_t output = model.addOperand(&tensorType);
model.addOperation(ANEURALNETWORKS_FLOOR, {input}, {output});
model.identifyInputsAndOutputs({input}, {output});
- assert(model.finish() == Result::NO_ERROR);
+ assert(model.finish() == WrapperResult::NO_ERROR);
return model;
}
@@ -697,13 +719,13 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
GTEST_SKIP();
}
- ASSERT_EQ(mCompilation.finish(), Result::NO_ERROR);
+ ASSERT_EQ(mCompilation.finish(), WrapperResult::NO_ERROR);
const auto getDimensionsWhileRunning = [](WrapperExecution& execution) {
TestPreparedModelLatest::waitForExecutionToBegin();
// Cannot query dimensions while execution is running
std::vector<uint32_t> dimensions;
- EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), WrapperResult::BAD_STATE);
};
{
@@ -712,21 +734,22 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution));
TestPreparedModelLatest::pauseExecutions(true);
WrapperEvent event;
- ASSERT_EQ(execution.startCompute(&event), Result::NO_ERROR);
+ ASSERT_EQ(execution.startCompute(&event), WrapperResult::NO_ERROR);
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
ASSERT_EQ(event.wait(), kExpectResult);
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
{
@@ -738,17 +761,18 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
{
@@ -767,20 +791,21 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
- if (kExpectResult != Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult != WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// computeWithDependencies doesn't support OUTPUT_INSUFFICIENT_SIZE
SCOPED_TRACE("computeWithDependencies");
WrapperExecution execution(&mCompilation);
@@ -796,32 +821,35 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(event.wait(), kExpectResult);
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
} else {
- ASSERT_EQ(event.wait(), Result::UNEXPECTED_NULL);
+ ASSERT_EQ(event.wait(), WrapperResult::UNEXPECTED_NULL);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
}
auto kTestValues = ::testing::Values(
- std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE,
+ std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR,
+ /* kUseIntrospectionAPI */ false),
+ std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED,
+ std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE,
+ std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+ WrapperResult::OUTPUT_INSUFFICIENT_SIZE,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
+ std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA,
/* kUseIntrospectionAPI */ false));
class ExecutionTest13 : public ExecutionTestTemplate<TestDriver13> {};
@@ -838,27 +866,29 @@ INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest12, kTestValues);
class ExecutionTest11 : public ExecutionTestTemplate<TestDriver11> {};
TEST_P(ExecutionTest11, Wait) {
- if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
+ if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
TestWait();
}
INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest11, kTestValues);
class ExecutionTest10 : public ExecutionTestTemplate<TestDriver10> {};
TEST_P(ExecutionTest10, Wait) {
- if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
+ if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
TestWait();
}
INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest10, kTestValues);
auto kIntrospectionTestValues = ::testing::Values(
- std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE,
+ std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR,
+ /* kUseIntrospectionAPI */ true),
+ std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED,
+ std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE,
+ std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+ WrapperResult::OUTPUT_INSUFFICIENT_SIZE,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
+ std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA,
/* kUseIntrospectionAPI */ true));
INSTANTIATE_TEST_SUITE_P(IntrospectionFlavor, ExecutionTest13, kIntrospectionTestValues);
diff --git a/nn/runtime/test/TestExtensions.cpp b/nn/runtime/test/TestExtensions.cpp
index f104854b9..da13073e2 100644
--- a/nn/runtime/test/TestExtensions.cpp
+++ b/nn/runtime/test/TestExtensions.cpp
@@ -32,7 +32,9 @@ using DeviceManager = ::android::nn::DeviceManager;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
using TypeManager = ::android::nn::TypeManager;
-using namespace android::nn::hal;
+namespace hardware = ::android::hardware;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
const char* kTestDriverName = "extensions-test-driver";
const char* kTestExtension1 = "vendor.test.one";
@@ -44,23 +46,24 @@ class TestDriver : public SampleDriver {
TestDriver() : SampleDriver(kTestDriverName) {}
~TestDriver() override {}
- Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
cb(V1_0::ErrorStatus::NONE, {
{.name = kTestExtension1},
{.name = kTestExtension2},
{.name = kTestExtension3},
});
- return Void();
+ return hardware::Void();
}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_1_3_cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model&,
+ getSupportedOperations_1_3_cb) override {
CHECK(false) << "not implemented";
- return Void();
+ return hardware::Void();
}
};
diff --git a/nn/runtime/test/TestFailingDriver.cpp b/nn/runtime/test/TestFailingDriver.cpp
index 7d41ace20..d2e30a656 100644
--- a/nn/runtime/test/TestFailingDriver.cpp
+++ b/nn/runtime/test/TestFailingDriver.cpp
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include <algorithm>
#include <memory>
#include <vector>
@@ -28,7 +29,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using sample_driver::SampleDriverPartial;
using Result = test_wrapper::Result;
using WrapperOperandType = test_wrapper::OperandType;
@@ -50,20 +50,21 @@ class FailingTestDriver : public SampleDriverPartial {
// EmptyOperationResolver causes execution to fail.
FailingTestDriver() : SampleDriverPartial(kTestDriverName, &mEmptyOperationResolver) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE,
- {.operandPerformance = {{.type = OperandType::TENSOR_FLOAT32,
+ {.operandPerformance = {{.type = V1_3::OperandType::TENSOR_FLOAT32,
.info = {.execTime = 0.1, // Faster than CPU.
.powerUsage = 0.1}}}});
- return Void();
+ return hardware::Void();
}
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override {
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override {
std::vector<bool> supported(model.main.operations.size());
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [](const Operation& operation) { return operation.type == OperationType::SQRT; });
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [](const V1_3::Operation& operation) {
+ return operation.type == V1_3::OperationType::SQRT;
+ });
return supported;
}
diff --git a/nn/runtime/test/TestIntrospectionControl.cpp b/nn/runtime/test/TestIntrospectionControl.cpp
index 972619ef5..abb7e3306 100644
--- a/nn/runtime/test/TestIntrospectionControl.cpp
+++ b/nn/runtime/test/TestIntrospectionControl.cpp
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include <algorithm>
#include <chrono>
#include <iterator>
#include <map>
@@ -41,7 +42,10 @@
namespace {
using namespace ::android;
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
@@ -63,40 +67,42 @@ using nn::convertToV1_3;
template <typename T>
using MQDescriptorSync = hardware::MQDescriptorSync<T>;
-constexpr Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-constexpr Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456};
-constexpr Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56};
+constexpr V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+constexpr V1_2::Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456};
+constexpr V1_2::Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56};
// This is an IDevice for testing purposes. The test driver has customized
// getCapabilities_1_3 and getSupportedOperations_1_3.
class TestDriver : public SampleDriver {
public:
- TestDriver(const char* name, Capabilities capabilities, const std::vector<bool>& supportedOps)
+ TestDriver(const char* name, V1_3::Capabilities capabilities,
+ const std::vector<bool>& supportedOps)
: SampleDriver(name), mCapabilities(capabilities), mSupportedOps(supportedOps) {}
~TestDriver() override {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, mCapabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
- return Void();
+ return hardware::Void();
}
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [this](Operation op) { return mSupportedOps[static_cast<int32_t>(op.type)]; });
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [this](V1_3::Operation op) {
+ return mSupportedOps[static_cast<int32_t>(op.type)];
+ });
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
private:
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
std::vector<bool> mSupportedOps;
};
@@ -119,7 +125,7 @@ class IntrospectionControlTest : public ::testing::Test {
struct DeviceSpecification {
DeviceSpecification(const std::string& name, float perf, std::vector<bool>& supportedOps)
: mName(name), mSupportedOps(supportedOps) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
mCapabilities = {
.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
@@ -129,7 +135,7 @@ class IntrospectionControlTest : public ::testing::Test {
.whilePerformance = perfInfo};
}
std::string mName;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
std::vector<bool> mSupportedOps;
};
@@ -383,14 +389,14 @@ std::ostream& operator<<(std::ostream& os, Success success) {
// Returns (unfenced timing, fenced timing).
// Not for PASS_CPU.
-std::pair<Timing, Timing> getExpectedTiming(Success s, bool fencedExecution) {
+std::pair<V1_2::Timing, V1_2::Timing> getExpectedTiming(Success s, bool fencedExecution) {
CHECK_NE(s, Success::PASS_CPU);
if (!hasBit(s, Success::PASS_BIT)) {
return {kBadTiming, kBadTiming};
}
- std::pair<Timing, Timing> result;
+ std::pair<V1_2::Timing, V1_2::Timing> result;
result.first.timeOnDevice = hasBit(s, Success::PASS_UNFENCED_DEVICE_BIT)
? kGoodUnfencedTiming.timeOnDevice
: UINT64_MAX;
@@ -416,12 +422,12 @@ std::pair<Timing, Timing> getExpectedTiming(Success s, bool fencedExecution) {
class TestPreparedModelLatest : public SamplePreparedModel {
public:
TestPreparedModelLatest(const HidlModel& model, const SampleDriver* driver, Success success)
- : SamplePreparedModel(model, driver, ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{},
- kDefaultPriority),
+ : SamplePreparedModel(model, driver, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{},
+ nn::kDefaultPriority13),
mSuccess(success) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request&, const sp<V1_0::IExecutionCallback>& callback) override {
switch (mSuccess) {
case Success::PASS_NEITHER:
std::thread([callback] {
@@ -445,9 +451,10 @@ class TestPreparedModelLatest : public SamplePreparedModel {
}
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request&, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
switch (mSuccess) {
case Success::PASS_NEITHER:
case Success::PASS_DEVICE:
@@ -475,17 +482,18 @@ class TestPreparedModelLatest : public SamplePreparedModel {
}
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const sp<V1_3::IExecutionCallback>& callback) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request&, V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const sp<V1_3::IExecutionCallback>& callback) override {
// Use a placeholder V1_0::Request because execute_1_2 ignores request entirely.
const V1_0::ErrorStatus status = execute_1_2(V1_0::Request{}, measure, callback);
return convertToV1_3(status);
}
- Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming measure,
- executeSynchronously_cb cb) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
switch (mSuccess) {
case Success::PASS_NEITHER:
case Success::PASS_DEVICE:
@@ -493,7 +501,7 @@ class TestPreparedModelLatest : public SamplePreparedModel {
case Success::PASS_BOTH:
dummyExecution();
cb(V1_0::ErrorStatus::NONE, {}, getExpectedTiming(mSuccess, false).first);
- return Void();
+ return hardware::Void();
case Success::FAIL_WAIT:
// While this is a synchronous execution method, the NNAPI
// runtime may call it even for asynchronous execution, so we
@@ -503,19 +511,22 @@ class TestPreparedModelLatest : public SamplePreparedModel {
case Success::FAIL_LAUNCH:
dummyExecution();
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
default:
ADD_FAILURE() << "Unexpected Success kind";
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeSynchronously_1_3_cb cb) override {
const auto wrappedCb = [&cb](V1_0::ErrorStatus status,
- const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
cb(convertToV1_3(status), outputShapes, timing);
};
// Use a placeholder V1_0::Request because executeSynchronously ignores request entirely.
@@ -525,7 +536,7 @@ class TestPreparedModelLatest : public SamplePreparedModel {
// ExecutionBurstServer::create has an overload that will use
// IPreparedModel::executeSynchronously(), so we can rely on that, rather
// than having to implement ExecutionBurstServer::IExecutorWithCache.
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -534,21 +545,26 @@ class TestPreparedModelLatest : public SamplePreparedModel {
callback, requestChannel, resultChannel, this, std::chrono::microseconds{0});
cb(burst == nullptr ? V1_0::ErrorStatus::GENERAL_FAILURE : V1_0::ErrorStatus::NONE, burst);
- return Void();
+ return hardware::Void();
}
- Return<void> executeFenced(const Request&, const hidl_vec<hidl_handle>&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const OptionalTimeoutDuration&, executeFenced_cb callback) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<void> executeFenced(const V1_3::Request&,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeFenced_cb callback) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
if (hasBit(mSuccess, Success::PASS_BIT)) {
dummyExecution();
const auto expectedTiming = getExpectedTiming(mSuccess, true);
sp<SampleFencedExecutionCallback> fencedExecutionCallback =
new SampleFencedExecutionCallback(expectedTiming.first, expectedTiming.second,
V1_3::ErrorStatus::NONE);
- callback(V1_3::ErrorStatus::NONE, hidl_handle(nullptr), fencedExecutionCallback);
- return Void();
+ callback(V1_3::ErrorStatus::NONE, hardware::hidl_handle(nullptr),
+ fencedExecutionCallback);
+ return hardware::Void();
}
switch (mSuccess) {
case Success::FAIL_WAIT:
@@ -559,11 +575,12 @@ class TestPreparedModelLatest : public SamplePreparedModel {
FALLTHROUGH_INTENDED;
case Success::FAIL_LAUNCH:
dummyExecution();
- callback(V1_3::ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
- return Void();
+ callback(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr),
+ nullptr);
+ return hardware::Void();
default:
ADD_FAILURE() << "Unexpected Success kind";
- return Void();
+ return hardware::Void();
}
}
@@ -607,22 +624,24 @@ class TestPreparedModel12 : public V1_2::IPreparedModel {
TestPreparedModel12(const HidlModel& model, const SampleDriver* driver, Success success)
: mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute_1_2(request, measure, callback);
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
return mLatestPreparedModel->executeSynchronously(request, measure, cb);
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -632,7 +651,7 @@ class TestPreparedModel12 : public V1_2::IPreparedModel {
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Like TestPreparedModelLatest, but implementing 1.0
@@ -641,13 +660,13 @@ class TestPreparedModel10 : public V1_0::IPreparedModel {
TestPreparedModel10(const HidlModel& model, const SampleDriver* driver, Success success)
: mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Behaves like SampleDriver, except that it produces customized IPrepareModel.
@@ -656,31 +675,31 @@ class TestDriver13 : public SampleDriver {
TestDriver13(const std::string& name, Success success)
: SampleDriver(name.c_str()), mSuccess(success) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance =
nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.operations.size(), true);
cb(V1_0::ErrorStatus::NONE, supported);
@@ -688,39 +707,41 @@ class TestDriver13 : public SampleDriver {
std::vector<bool> supported;
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, supported);
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference, Priority, const OptionalTimePoint&,
- const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference, V1_3::Priority,
+ const V1_3::OptionalTimePoint&, const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) override {
callback->notify_1_3(V1_3::ErrorStatus::NONE,
new TestPreparedModel13(model, this, mSuccess));
return V1_3::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) override {
callback->notify_1_2(V1_0::ErrorStatus::NONE,
new TestPreparedModel12(nn::convertToV1_3(model), this, mSuccess));
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference,
const sp<V1_0::IPreparedModelCallback>& callback) override {
callback->notify(V1_0::ErrorStatus::NONE,
new TestPreparedModel10(nn::convertToV1_3(model), this, mSuccess));
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override {
- return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
- callback);
+ return prepareModel_1_1(nn::convertToV1_1(model),
+ V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, callback);
}
private:
@@ -732,27 +753,27 @@ class TestDriver11 : public V1_1::IDevice {
public:
TestDriver11(const std::string& name, Success success)
: mLatestDriver(new TestDriver13(name, success)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/nn/runtime/test/TestMemoryDomain.cpp b/nn/runtime/test/TestMemoryDomain.cpp
index 06418e5af..35a826ab8 100644
--- a/nn/runtime/test/TestMemoryDomain.cpp
+++ b/nn/runtime/test/TestMemoryDomain.cpp
@@ -34,20 +34,22 @@
#include "TestUtils.h"
using namespace android::nn;
-using namespace hal;
-using Result = test_wrapper::Result;
+namespace hardware = android::hardware;
+using WrapperResult = test_wrapper::Result;
using Type = test_wrapper::Type;
+using android::sp;
namespace {
// A buffer for test that does nothing.
-class TestBuffer : public IBuffer {
+class TestBuffer : public V1_3::IBuffer {
public:
- Return<ErrorStatus> copyTo(const hidl_memory&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
+ hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory&) override {
+ return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<ErrorStatus> copyFrom(const hidl_memory&, const hidl_vec<uint32_t>&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
+ hardware::Return<V1_3::ErrorStatus> copyFrom(const hardware::hidl_memory&,
+ const hardware::hidl_vec<uint32_t>&) override {
+ return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
};
@@ -73,64 +75,67 @@ std::ostream& operator<<(std::ostream& os, AllocateReturn allocateReturn) {
class TestDriverLatest : public sample_driver::SampleDriver {
public:
- TestDriverLatest(const char* name, std::set<OperationType> supportedOperations,
+ TestDriverLatest(const char* name, std::set<V1_3::OperationType> supportedOperations,
AllocateReturn allocateReturn)
: SampleDriver(name),
kSupportedOperations(std::move(supportedOperations)),
kAllocateReturn(allocateReturn) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
// Faster than cpu.
- const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
- const Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ const V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
// The tests will never use a referenced model.
CHECK(model.referenced.size() == 0);
std::vector<bool> supported(model.main.operations.size(), false);
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [this](const Operation& op) { return kSupportedOperations.count(op.type) > 0; });
- cb(ErrorStatus::NONE, supported);
- return Void();
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [this](const V1_3::Operation& op) {
+ return kSupportedOperations.count(op.type) > 0;
+ });
+ cb(V1_3::ErrorStatus::NONE, supported);
+ return hardware::Void();
}
- Return<void> allocate(const BufferDesc&, const hidl_vec<sp<IPreparedModel>>&,
- const hidl_vec<BufferRole>&, const hidl_vec<BufferRole>&,
- allocate_cb cb) override {
+ hardware::Return<void> allocate(const V1_3::BufferDesc&,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>&,
+ const hardware::hidl_vec<V1_3::BufferRole>&,
+ const hardware::hidl_vec<V1_3::BufferRole>&,
+ allocate_cb cb) override {
switch (kAllocateReturn) {
case AllocateReturn::OK:
- cb(ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::BAD_IBUFFER:
- cb(ErrorStatus::NONE, nullptr, mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, nullptr, mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::BAD_TOKEN:
- cb(ErrorStatus::NONE, new TestBuffer(), 0);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, new TestBuffer(), 0);
+ return hardware::Void();
case AllocateReturn::BAD_STATUS:
- cb(ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::NOT_SUPPORTED:
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ return hardware::Void();
}
LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn);
- return Void();
+ return hardware::Void();
}
private:
- const std::set<OperationType> kSupportedOperations;
+ const std::set<V1_3::OperationType> kSupportedOperations;
const AllocateReturn kAllocateReturn;
uint32_t mValidBufferToken = 1;
};
@@ -160,7 +165,7 @@ void createTestModel(test_wrapper::Model* model) {
model->addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp});
model->addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1});
model->identifyInputsAndOutputs({input0, input1, input2}, {output0, output1});
- EXPECT_EQ(model->finish(), Result::NO_ERROR);
+ EXPECT_EQ(model->finish(), WrapperResult::NO_ERROR);
}
class MemoryDomainTestBase : public ::testing::Test {
@@ -199,14 +204,14 @@ class MemoryDomainTestBase : public ::testing::Test {
std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size());
std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(),
[&deviceMap](const std::string& name) { return deviceMap.at(name); });
- Result result;
+ WrapperResult result;
std::tie(result, compilation) =
test_wrapper::Compilation::createForDevices(&mModel, devices);
- EXPECT_EQ(result, Result::NO_ERROR);
+ EXPECT_EQ(result, WrapperResult::NO_ERROR);
} else {
compilation = test_wrapper::Compilation(&mModel);
}
- EXPECT_EQ(compilation.finish(), Result::NO_ERROR);
+ EXPECT_EQ(compilation.finish(), WrapperResult::NO_ERROR);
return compilation;
}
@@ -245,7 +250,8 @@ class MemoryDomainTest : public MemoryDomainTestBase,
public ::testing::WithParamInterface<MemoryDomainTestParam> {
protected:
// If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED.
- void createAndRegisterDriver(const char* name, std::set<OperationType> supportedOperations,
+ void createAndRegisterDriver(const char* name,
+ std::set<V1_3::OperationType> supportedOperations,
AllocateReturn allocateReturn) {
sp<V1_0::IDevice> driver;
if (kUseV1_2Driver) {
@@ -275,9 +281,10 @@ class MemoryDomainTest : public MemoryDomainTestBase,
// Test device memory allocation on a compilation with only a single partition.
TEST_P(MemoryDomainTest, SinglePartition) {
- createAndRegisterDriver("test_driver",
- {OperationType::ADD, OperationType::SUB, OperationType::MUL},
- kAllocateReturn);
+ createAndRegisterDriver(
+ "test_driver",
+ {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
+ kAllocateReturn);
auto compilation = createCompilation({"test_driver"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -285,7 +292,7 @@ TEST_P(MemoryDomainTest, SinglePartition) {
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
@@ -295,7 +302,7 @@ TEST_P(MemoryDomainTest, SinglePartition) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -311,9 +318,9 @@ TEST_P(MemoryDomainTest, SinglePartition) {
// Test device memory allocation on a compilation with multiple partitions.
TEST_P(MemoryDomainTest, MultiplePartitions) {
- createAndRegisterDriver("test_driver_add", {OperationType::ADD}, kAllocateReturn);
- createAndRegisterDriver("test_driver_sub", {OperationType::SUB}, kAllocateReturn);
- createAndRegisterDriver("test_driver_mul", {OperationType::MUL}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_add", {V1_3::OperationType::ADD}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_sub", {V1_3::OperationType::SUB}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_mul", {V1_3::OperationType::MUL}, kAllocateReturn);
auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -323,7 +330,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
@@ -333,7 +340,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -357,7 +364,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -380,7 +387,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -396,9 +403,10 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
// Test device memory allocation with dynamic shape.
TEST_P(MemoryDomainTest, DynamicShape) {
- createAndRegisterDriver("test_driver",
- {OperationType::ADD, OperationType::SUB, OperationType::MUL},
- kAllocateReturn);
+ createAndRegisterDriver(
+ "test_driver",
+ {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
+ kAllocateReturn);
auto compilation = createCompilation({"test_driver"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -406,7 +414,7 @@ TEST_P(MemoryDomainTest, DynamicShape) {
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index d85717ce7..939612a78 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -145,7 +145,11 @@
namespace {
-using namespace android::nn::hal;
+namespace hardware = android::hardware;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = ::android::nn::CompilationBuilder;
using Deadline = ::android::nn::Deadline;
using Device = ::android::nn::Device;
@@ -154,10 +158,13 @@ using ExecutePreference = ::android::nn::test_wrapper::ExecutePreference;
using ExecutePriority = ::android::nn::test_wrapper::ExecutePriority;
using ExecutionPlan = ::android::nn::ExecutionPlan;
using ExecutionStep = ::android::nn::ExecutionStep;
+using HalCacheToken = ::android::nn::HalCacheToken;
using HalVersion = ::android::nn::HalVersion;
using HidlModel = V1_3::Model;
using LogicalStep = ::android::nn::LogicalStep;
using ModelBuilder = ::android::nn::ModelBuilder;
+using Operand = ::android::nn::Operand;
+using Operation = ::android::nn::Operation;
using Result = ::android::nn::test_wrapper::Result;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
using WrapperCompilation = ::android::nn::test_wrapper::Compilation;
@@ -166,9 +173,10 @@ using WrapperModel = ::android::nn::test_wrapper::Model;
using WrapperOperandType = ::android::nn::test_wrapper::OperandType;
using WrapperSymmPerChannelQuantParams = ::android::nn::test_wrapper::SymmPerChannelQuantParams;
using WrapperType = ::android::nn::test_wrapper::Type;
+using android::sp;
-Capabilities makeCapabilities(float perf) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+V1_3::Capabilities makeCapabilities(float perf) {
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
.operandPerformance =
@@ -177,12 +185,12 @@ Capabilities makeCapabilities(float perf) {
.whilePerformance = perfInfo};
};
-void update(Capabilities* capabilities, OperandType type, float perf) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+void update(V1_3::Capabilities* capabilities, V1_3::OperandType type, float perf) {
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
::android::nn::update(&capabilities->operandPerformance, type, perfInfo);
}
-float lookupExecTime(const Capabilities& capabilities, OperandType type) {
+float lookupExecTime(const V1_3::Capabilities& capabilities, V1_3::OperandType type) {
return ::android::nn::lookup(capabilities.operandPerformance, type).execTime;
}
@@ -214,16 +222,16 @@ const uint32_t kFirstEncodingHARD_SWISH = kLastEncodingV1_2 + 1;
const uint32_t kFirstEncodingV1_3 = kFirstEncodingHARD_SWISH;
const uint32_t kLastEncodingV1_3 = kFirstEncodingHARD_SWISH;
-const std::map<OperationType, uint32_t> operationToFirstEncoding = {
- {OperationType::ADD, kFirstEncodingADD},
- {OperationType::MUL, kFirstEncodingMUL},
- {OperationType::DIV, kFirstEncodingDIV},
- {OperationType::SUB, kFirstEncodingSUB},
- {OperationType::MAXIMUM, kFirstEncodingMAXIMUM},
- {OperationType::MINIMUM, kFirstEncodingMINIMUM},
- {OperationType::POW, kFirstEncodingPOW},
- {OperationType::PRELU, kFirstEncodingPRELU},
- {OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH},
+const std::map<V1_3::OperationType, uint32_t> operationToFirstEncoding = {
+ {V1_3::OperationType::ADD, kFirstEncodingADD},
+ {V1_3::OperationType::MUL, kFirstEncodingMUL},
+ {V1_3::OperationType::DIV, kFirstEncodingDIV},
+ {V1_3::OperationType::SUB, kFirstEncodingSUB},
+ {V1_3::OperationType::MAXIMUM, kFirstEncodingMAXIMUM},
+ {V1_3::OperationType::MINIMUM, kFirstEncodingMINIMUM},
+ {V1_3::OperationType::POW, kFirstEncodingPOW},
+ {V1_3::OperationType::PRELU, kFirstEncodingPRELU},
+ {V1_3::OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH},
};
// Sorted in reverse order (std::greater) so that we can use map::lower_bound to
@@ -244,20 +252,20 @@ const std::map<uint32_t, std::pair<uint32_t, bool>, std::greater<>> firstEncodin
// Look up the operation with the specified index in a graph, and return the
// operation encoding; or, if for some reason this is not one of the encoded
// operations, then return kBadOperation.
-uint32_t lookupOperation(std::function<const Operation&(uint32_t)> getOperation,
- std::function<const Operand&(uint32_t)> getOperand,
+uint32_t lookupOperation(std::function<const V1_3::Operation&(uint32_t)> getOperation,
+ std::function<const V1_3::Operand&(uint32_t)> getOperand,
std::function<const uint8_t*(uint32_t)> getValue,
uint32_t operationIndex) {
- const Operation& operation = getOperation(operationIndex);
+ const V1_3::Operation& operation = getOperation(operationIndex);
switch (operation.type) {
- case OperationType::ADD:
- case OperationType::MUL:
- case OperationType::DIV:
- case OperationType::SUB: {
+ case V1_3::OperationType::ADD:
+ case V1_3::OperationType::MUL:
+ case V1_3::OperationType::DIV:
+ case V1_3::OperationType::SUB: {
// input2 is the fused activation function
- const Operand& input2 = getOperand(operation.inputs[2]);
- if ((input2.type == OperandType::INT32) &&
- (input2.lifetime == OperandLifeTime::CONSTANT_COPY)) {
+ const V1_3::Operand& input2 = getOperand(operation.inputs[2]);
+ if ((input2.type == V1_3::OperandType::INT32) &&
+ (input2.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY)) {
int32_t value;
CHECK_EQ(sizeof(value), input2.location.length);
memcpy(&value, getValue(input2.location.offset), input2.location.length);
@@ -276,11 +284,15 @@ uint32_t lookupOperation(std::function<const Operation&(uint32_t)> getOperation,
return kBadOperation;
}
-uint32_t lookupOperation(const HidlModel& model, const Subgraph& subgraph,
+uint32_t lookupOperation(const HidlModel& model, const V1_3::Subgraph& subgraph,
uint32_t operationIndex) {
return lookupOperation(
- [&subgraph](uint32_t index) -> const Operation& { return subgraph.operations[index]; },
- [&subgraph](uint32_t index) -> const Operand& { return subgraph.operands[index]; },
+ [&subgraph](uint32_t index) -> const V1_3::Operation& {
+ return subgraph.operations[index];
+ },
+ [&subgraph](uint32_t index) -> const V1_3::Operand& {
+ return subgraph.operands[index];
+ },
[&model](uint32_t offset) { return &model.operandValues[offset]; }, operationIndex);
}
@@ -288,12 +300,11 @@ uint32_t lookupOperation(const HidlModel& model, const Subgraph& subgraph,
// This is a debugging utility function
void dump(const char* name, const ModelBuilder* model) {
const HidlModel hidlModel = model->makeHidlModel();
- std::cout << name << ": " << toString(hidlModel) << std::endl;
- std::cout << "inputs: " << toString(hidlModel.main.inputIndexes) << std::endl;
- std::cout << "outputs: " << toString(hidlModel.main.outputIndexes) << std::endl;
+ std::cout << name << ": " << hidlModel << std::endl;
+ std::cout << "inputs: " << hidlModel.main.inputIndexes << std::endl;
+ std::cout << "outputs: " << hidlModel.main.outputIndexes << std::endl;
for (size_t i = 0, e = hidlModel.main.operations.size(); i < e; i++) {
- std::cout << "operation[" << i << "]: " << toString(hidlModel.main.operations[i])
- << std::endl;
+ std::cout << "operation[" << i << "]: " << hidlModel.main.operations[i] << std::endl;
}
}
#endif
@@ -313,37 +324,39 @@ class PartitioningDriver : public SampleDriver {
OEMYes, // accepted by getSupportedOperations and prepareModel
};
- PartitioningDriver(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriver(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask, OEM oem = OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: SampleDriver(name),
mVersionString(version),
mCapabilities(capabilities),
mOperationMask(operationMask),
mOEM(oem),
mOperationTypes(std::move(operationTypes)) {
- CHECK_EQ(mOperationTypes.count(OperationType::OEM_OPERATION), size_t(0));
+ CHECK_EQ(mOperationTypes.count(V1_3::OperationType::OEM_OPERATION), size_t(0));
if (operationMask) {
- std::for_each(mOperationTypes.begin(), mOperationTypes.end(), [](OperationType type) {
- CHECK_EQ(operationToFirstEncoding.count(type), size_t(0));
- });
+ std::for_each(mOperationTypes.begin(), mOperationTypes.end(),
+ [](V1_3::OperationType type) {
+ CHECK_EQ(operationToFirstEncoding.count(type), size_t(0));
+ });
}
}
~PartitioningDriver() override {}
- Return<void> getVersionString(getVersionString_cb cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb cb) override {
cb(V1_0::ErrorStatus::NONE, mVersionString);
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& callback) override {
if (mOEM == OEMIndecisive) {
for (const auto& operation : model.main.operations) {
- if (operation.type == OperationType::OEM_OPERATION) {
+ if (operation.type == V1_3::OperationType::OEM_OPERATION) {
callback->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
@@ -354,7 +367,7 @@ class PartitioningDriver : public SampleDriver {
V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
auto ret = getSupportedOperations_1_3(
model, [&outStatus](V1_3::ErrorStatus inStatus,
- const hidl_vec<bool>& supportedOperations) {
+ const hardware::hidl_vec<bool>& supportedOperations) {
if (inStatus == V1_3::ErrorStatus::NONE) {
if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
[](bool v) { return v; })) {
@@ -371,57 +384,60 @@ class PartitioningDriver : public SampleDriver {
}
}
- Return<DeviceStatus> getStatus() override { return DeviceStatus::AVAILABLE; }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override {
+ return V1_0::DeviceStatus::AVAILABLE;
+ }
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, mCapabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
- return Void();
+ return hardware::Void();
}
cb(V1_3::ErrorStatus::NONE, getSupportedOperationsForSubgraph(model, model.main));
- return Void();
+ return hardware::Void();
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/1, /*numDataCache=*/1);
- return Void();
+ return hardware::Void();
}
private:
- std::vector<bool> getSupportedOperationsForSubgraph(const Model& model,
- const Subgraph& subgraph) {
+ std::vector<bool> getSupportedOperationsForSubgraph(const V1_3::Model& model,
+ const V1_3::Subgraph& subgraph) {
CHECK(&subgraph == &model.main ||
std::find_if(model.referenced.begin(), model.referenced.end(),
- [&subgraph](const Subgraph& refSubgraph) {
+ [&subgraph](const V1_3::Subgraph& refSubgraph) {
return &subgraph == &refSubgraph;
}) != model.referenced.end());
auto supportsEntireSubgraph = [this, &model, &subgraph](uint32_t refSubgraphOperandIndex) {
CHECK_LT(refSubgraphOperandIndex, subgraph.operands.size());
- const Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex];
- CHECK(refSubgraphOperand.lifetime == OperandLifeTime::SUBGRAPH);
+ const V1_3::Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex];
+ CHECK(refSubgraphOperand.lifetime == V1_3::OperandLifeTime::SUBGRAPH);
CHECK_LT(refSubgraphOperand.location.offset, model.referenced.size());
- const Subgraph& refSubgraph = model.referenced[refSubgraphOperand.location.offset];
+ const V1_3::Subgraph& refSubgraph =
+ model.referenced[refSubgraphOperand.location.offset];
std::vector<bool> supported = getSupportedOperationsForSubgraph(model, refSubgraph);
return std::all_of(supported.begin(), supported.end(), [](bool x) { return x; });
};
const size_t count = subgraph.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = subgraph.operations[i];
+ const V1_3::Operation& operation = subgraph.operations[i];
if (mOperationTypes.count(operation.type)) {
- if (operation.type == OperationType::IF) {
+ if (operation.type == V1_3::OperationType::IF) {
namespace op = android::nn::operation_if;
CHECK_GE(operation.inputs.size(), op::kFirstInput);
supported[i] =
supportsEntireSubgraph(operation.inputs[op::kThenModelOperand]) &&
supportsEntireSubgraph(operation.inputs[op::kElseModelOperand]);
- } else if (operation.type == OperationType::WHILE) {
+ } else if (operation.type == V1_3::OperationType::WHILE) {
namespace op = android::nn::operation_while;
CHECK_GE(operation.inputs.size(), op::kFirstInput);
supported[i] =
@@ -432,7 +448,7 @@ class PartitioningDriver : public SampleDriver {
}
continue;
}
- if (operation.type == OperationType::OEM_OPERATION) {
+ if (operation.type == V1_3::OperationType::OEM_OPERATION) {
supported[i] = (mOEM != OEMNo);
continue;
}
@@ -447,72 +463,75 @@ class PartitioningDriver : public SampleDriver {
}
std::string mVersionString;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
uint32_t mOperationMask;
OEM mOEM;
- std::set<OperationType> mOperationTypes;
+ std::set<V1_3::OperationType> mOperationTypes;
};
// Like PartitioningDriver, but implementing 1.2
class PartitioningDriverV1_2 : public V1_2::IDevice {
public:
- PartitioningDriverV1_2(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_2(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -525,33 +544,33 @@ class PartitioningDriverV1_2 : public V1_2::IDevice {
// Like PartitioningDriver, but implementing 1.1
class PartitioningDriverV1_1 : public V1_1::IDevice {
public:
- PartitioningDriverV1_1(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_1(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -564,25 +583,25 @@ class PartitioningDriverV1_1 : public V1_1::IDevice {
// Like PartitioningDriver, but implementing 1.0
class PartitioningDriverV1_0 : public V1_0::IDevice {
public:
- PartitioningDriverV1_0(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_0(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
@@ -949,7 +968,7 @@ class PartitioningTest : public ::testing::Test {
// From a vector of DeviceSpecification, create a vector of
// Devices.
struct DeviceSpecification {
- DeviceSpecification(const std::string& name, const Capabilities& capabilities,
+ DeviceSpecification(const std::string& name, const V1_3::Capabilities& capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
: mName(name),
@@ -959,30 +978,31 @@ class PartitioningTest : public ::testing::Test {
mOEM(oem) {}
DeviceSpecification(const std::string& name, float perf, uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, perf, perf, operationMask, oem, operationTypes) {}
DeviceSpecification(const std::string& name, float perf, float perfRelaxed,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, kVersionString, perf, perfRelaxed, operationMask, oem,
operationTypes) {}
DeviceSpecification(const std::string& name, const std::string& version, float perf,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, version, perf, perf, operationMask, oem, operationTypes) {}
DeviceSpecification(const std::string& name, const std::string& version, float perf,
float perfRelaxed, uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mName(name),
mVersionString(version),
mOperationMask(operationMask),
mOEM(oem),
mOperationTypes(std::move(operationTypes)) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
- PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed, .powerUsage = perfRelaxed};
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ V1_0::PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed,
+ .powerUsage = perfRelaxed};
mCapabilities = {
.relaxedFloat32toFloat16PerformanceScalar = perfRelaxedInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfRelaxedInfo,
@@ -1004,11 +1024,11 @@ class PartitioningTest : public ::testing::Test {
std::string mName;
std::string mVersionString;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
HalVersion mHalVersion = HalVersion::LATEST;
uint32_t mOperationMask;
PartitioningDriver::OEM mOEM = PartitioningDriver::OEMNo;
- std::set<OperationType> mOperationTypes;
+ std::set<V1_3::OperationType> mOperationTypes;
static constexpr char kVersionString[] = "JUST_AN_EXAMPLE";
@@ -1137,7 +1157,7 @@ class PartitioningTest : public ::testing::Test {
// actual definitions
ASSERT_LT(model->operationCount(), kPseudoDefiningOperationBase);
for (uint32_t i = 0, e = model->operationCount(); i < e; i++) {
- const Operation& operation = model->getOperation(i);
+ const V1_3::Operation& operation = android::nn::convertToV1_3(model->getOperation(i));
for (uint32_t output : operation.outputs) {
(*defMap)[output] = i;
}
@@ -1149,12 +1169,12 @@ class PartitioningTest : public ::testing::Test {
}
// look for NO_VALUE and CONSTANT_COPY
for (uint32_t i = 0, e = model->operandCount(); i < e; i++) {
- const Operand& operand = model->getOperand(i);
+ const V1_3::Operand& operand = android::nn::convertToV1_3(model->getOperand(i));
switch (operand.lifetime) {
- case OperandLifeTime::NO_VALUE:
+ case V1_3::OperandLifeTime::NO_VALUE:
(*defMap)[i] = kPseudoDefiningOperationNoValue;
break;
- case OperandLifeTime::CONSTANT_COPY: {
+ case V1_3::OperandLifeTime::CONSTANT_COPY: {
ASSERT_EQ(operand.location.length, sizeof(uint32_t));
uint32_t value;
memcpy(&value, model->getPointerToOperandValue(operand.location.offset),
@@ -1163,9 +1183,9 @@ class PartitioningTest : public ::testing::Test {
(*defMap)[i] = kPseudoDefiningOperationConstantCopy0 + value;
break;
}
- case OperandLifeTime::TEMPORARY_VARIABLE:
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case V1_3::OperandLifeTime::TEMPORARY_VARIABLE:
+ case V1_3::OperandLifeTime::SUBGRAPH_INPUT:
+ case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT:
// already handled
break;
default:
@@ -1207,7 +1227,6 @@ class PartitioningTest : public ::testing::Test {
bool compare(const Operand& operandA, const Operand& operandB) {
if (operandA.type != operandB.type || operandA.dimensions != operandB.dimensions ||
- operandA.numberOfConsumers != operandB.numberOfConsumers ||
operandA.scale != operandB.scale || operandA.zeroPoint != operandB.zeroPoint) {
return false;
}
@@ -2021,8 +2040,8 @@ TEST_F(PartitioningTest, Perf) {
// WrapperOperandType is the NeuralNetworksWrapper.h representation of a
// full operand type (WrapperType plus dimensions plus other attributes).
- auto TestType = [](OperandType operandType) {
- if (operandType == OperandType::SUBGRAPH) {
+ auto TestType = [](V1_3::OperandType operandType) {
+ if (operandType == V1_3::OperandType::SUBGRAPH) {
// SUBGRAPH capabilities are handled differently.
return;
}
@@ -2037,11 +2056,11 @@ TEST_F(PartitioningTest, Perf) {
model.finish();
ASSERT_TRUE(model.isValid());
- const Capabilities baseCapabilities = makeCapabilities(0.5);
+ const V1_3::Capabilities baseCapabilities = makeCapabilities(0.5);
{
// better than base
- Capabilities goodCapabilities = baseCapabilities;
+ V1_3::Capabilities goodCapabilities = baseCapabilities;
update(&goodCapabilities, operandType, 0.25);
const auto devices =
@@ -2062,7 +2081,7 @@ TEST_F(PartitioningTest, Perf) {
{
// worse than base
- Capabilities badCapabilities = baseCapabilities;
+ V1_3::Capabilities badCapabilities = baseCapabilities;
update(&badCapabilities, operandType, 0.75);
const auto devices =
makeDevices({{"base", baseCapabilities, ~0U, PartitioningDriver::OEMYes},
@@ -2081,13 +2100,13 @@ TEST_F(PartitioningTest, Perf) {
}
};
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- TestType(static_cast<OperandType>(type));
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ TestType(static_cast<V1_3::OperandType>(type));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- TestType(static_cast<OperandType>(type));
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ TestType(static_cast<V1_3::OperandType>(type));
}
}
@@ -2167,8 +2186,9 @@ void DynamicTemporariesTest::compileModelAndComparePlan() {
ASSERT_TRUE(mModel.has_value());
ASSERT_TRUE(!mCompilation.has_value());
- auto devices = makeDevices({{"fill", 0.9, 0U, PartitioningDriver::OEMNo, {OperationType::FILL}},
- {"add", 0.9, 0U, PartitioningDriver::OEMNo, {OperationType::ADD}}});
+ auto devices =
+ makeDevices({{"fill", 0.9, 0U, PartitioningDriver::OEMNo, {V1_3::OperationType::FILL}},
+ {"add", 0.9, 0U, PartitioningDriver::OEMNo, {V1_3::OperationType::ADD}}});
mCompilation = PartitioningCompilation(&mModel.value(), devices);
ASSERT_EQ(mCompilation->setPartitioning(DeviceManager::kPartitioningWithoutFallback),
@@ -2824,44 +2844,44 @@ class PerfTest : public ::testing::Test {};
TEST_F(PerfTest, Lookup) {
// Derive an arbitrary (but reproducible) performance value from an OperandType.
// We'll use this to ensure that we can save and then recover a type's performance.
- auto typePerf = [](OperandType type) { return float(static_cast<uint32_t>(type)); };
+ auto typePerf = [](V1_3::OperandType type) { return float(static_cast<uint32_t>(type)); };
- Capabilities capabilities = makeCapabilities(-1.0f);
+ V1_3::Capabilities capabilities = makeCapabilities(-1.0f);
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
update(&capabilities, operandType, typePerf(operandType));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
update(&capabilities, operandType, typePerf(operandType));
}
// Make sure lookup retrieves the values stored by update
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
- if (operandType == OperandType::SUBGRAPH) {
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
+ if (operandType == V1_3::OperandType::SUBGRAPH) {
// SUBGRAPH capabilities are handled differently.
continue;
}
SCOPED_TRACE(toString(operandType));
EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
SCOPED_TRACE(toString(operandType));
EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType));
}
// Check the behavior of a missing type
- OperandType operandType =
- static_cast<OperandType>(static_cast<uint32_t>(OperandTypeRange::BASE_MAX) + 1);
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(
+ static_cast<uint32_t>(V1_3::OperandTypeRange::BASE_MAX) + 1);
EXPECT_EQ(lookupExecTime(capabilities, operandType), FLT_MAX);
}
@@ -3005,7 +3025,7 @@ TEST_F(ControlFlowPartitioningTest, IF_SimplePlan) {
// The device supports all operations.
const auto devices =
- makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}});
+ makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {V1_3::OperationType::IF}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3023,7 +3043,7 @@ TEST_F(ControlFlowPartitioningTest, WHILE_SimplePlan) {
0.9,
~0U,
PartitioningDriver::OEMNo,
- {OperationType::WHILE, OperationType::EQUAL}}});
+ {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3047,7 +3067,7 @@ void ControlFlowPartitioningTest::testIfUnknownSize(Dimensioned dimensionedMain,
// The device supports all operations but the partitioner ignores its IF
// support due to http://b/159076604#comment5.
const auto devices =
- makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}});
+ makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {V1_3::OperationType::IF}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3090,7 +3110,7 @@ void ControlFlowPartitioningTest::testWhileUnknownSize(Dimensioned dimensionedMa
0.9,
~0U,
PartitioningDriver::OEMNo,
- {OperationType::WHILE, OperationType::EQUAL}}});
+ {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
diff --git a/nn/runtime/test/TestPartitioningRandom.cpp b/nn/runtime/test/TestPartitioningRandom.cpp
index 51d7910cc..294d93ad5 100644
--- a/nn/runtime/test/TestPartitioningRandom.cpp
+++ b/nn/runtime/test/TestPartitioningRandom.cpp
@@ -95,11 +95,15 @@
namespace android {
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
-using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
+using Device = nn::Device;
using ExecutionPlan = nn::ExecutionPlan;
+using HalCacheToken = nn::HalCacheToken;
using HalVersion = nn::HalVersion;
using HidlModel = V1_3::Model;
using ModelBuilder = nn::ModelBuilder;
@@ -335,7 +339,7 @@ class RandomPartitioningTest : public ::testing::TestWithParam<unsigned> {
public:
RandomPartitioningTest() : mRandNumEng(GetParam() /* seed */), mRandNumUnitDist(0.0, 1.0) {}
- static Signature getSignature(const HidlModel& model, const Operation& operation);
+ static Signature getSignature(const HidlModel& model, const V1_3::Operation& operation);
protected:
static V1_0::IDevice* makeTestDriver(HalVersion version, const char* name,
@@ -500,7 +504,8 @@ HalVersion RandomPartitioningTest::getMinHalVersion(ANeuralNetworksOperationType
return kOperationToVersion.at(type);
}
-Signature RandomPartitioningTest::getSignature(const HidlModel& model, const Operation& operation) {
+Signature RandomPartitioningTest::getSignature(const HidlModel& model,
+ const V1_3::Operation& operation) {
static const auto kOperationToActivation = [] {
std::map<ANeuralNetworksOperationType, int> result;
for (const auto& pattern : kOperationPatterns) {
@@ -516,9 +521,10 @@ Signature RandomPartitioningTest::getSignature(const HidlModel& model, const Ope
return Signature(operationType, -1);
}
- const Operand& operand = model.main.operands[operation.inputs[activationFunctionInputIndex]];
- CHECK(operand.lifetime == OperandLifeTime::CONSTANT_COPY);
- CHECK(operand.type == OperandType::INT32);
+ const V1_3::Operand& operand =
+ model.main.operands[operation.inputs[activationFunctionInputIndex]];
+ CHECK(operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY);
+ CHECK(operand.type == V1_3::OperandType::INT32);
int32_t value;
memcpy(&value, &model.operandValues[operand.location.offset], operand.location.length);
return Signature(operationType, value);
@@ -546,21 +552,21 @@ class TestDriver : public SampleDriver {
TestDriver(const char* name, std::set<Signature> signatures)
: SampleDriver(name), mSignatures(std::move(signatures)) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nn::nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
@@ -572,19 +578,20 @@ class TestDriver : public SampleDriver {
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& callback) override {
// NOTE: We verify that all operations in the model are supported.
V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
auto ret = getSupportedOperations_1_3(
model, [&outStatus](V1_3::ErrorStatus inStatus,
- const hidl_vec<bool>& supportedOperations) {
+ const hardware::hidl_vec<bool>& supportedOperations) {
if (inStatus == V1_3::ErrorStatus::NONE) {
if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
[](bool v) { return v; })) {
@@ -610,57 +617,60 @@ class TestDriverV1_2 : public V1_2::IDevice {
public:
TestDriverV1_2(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -675,27 +685,27 @@ class TestDriverV1_1 : public V1_1::IDevice {
public:
TestDriverV1_1(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -710,19 +720,19 @@ class TestDriverV1_0 : public V1_0::IDevice {
public:
TestDriverV1_0(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
diff --git a/nn/runtime/test/TestRemoveDefaultArguments.cpp b/nn/runtime/test/TestRemoveDefaultArguments.cpp
index 8726adc85..daef6bf60 100644
--- a/nn/runtime/test/TestRemoveDefaultArguments.cpp
+++ b/nn/runtime/test/TestRemoveDefaultArguments.cpp
@@ -98,7 +98,6 @@ const test_helper::TestModel& get_test_model_align_corners_2x2_to_1x1();
namespace android::nn {
namespace {
-using namespace hal;
using sample_driver::SampleDriverPartial;
using Result = test_wrapper::Result;
using WrapperOperandType = test_wrapper::OperandType;
@@ -113,18 +112,18 @@ class TestDriver : public SampleDriverPartial {
public:
TestDriver() : SampleDriverPartial(kTestDriverName) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
- return Void();
+ return hardware::Void();
}
void setSupportedInputCount(uint32_t count) { mSupportedInputCount = count; }
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override {
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override {
std::vector<bool> supported(model.main.operations.size());
std::transform(model.main.operations.begin(), model.main.operations.end(),
- supported.begin(), [this](const Operation& operation) {
+ supported.begin(), [this](const V1_3::Operation& operation) {
SCOPED_TRACE("operation = " + toString(operation.type));
EXPECT_EQ(operation.inputs.size(), mSupportedInputCount);
return operation.inputs.size() == mSupportedInputCount;
diff --git a/nn/runtime/test/TestUnspecifiedDimensions.cpp b/nn/runtime/test/TestUnspecifiedDimensions.cpp
index c1bad04a8..5a2287c78 100644
--- a/nn/runtime/test/TestUnspecifiedDimensions.cpp
+++ b/nn/runtime/test/TestUnspecifiedDimensions.cpp
@@ -17,7 +17,10 @@
#include "TestNeuralNetworksWrapper.h"
#include <sys/mman.h>
+#include <memory>
+#include <string>
#include <tuple>
+#include <utility>
#include <vector>
#include <android-base/macros.h>
diff --git a/nn/runtime/test/TestVersionedInterfaces.cpp b/nn/runtime/test/TestVersionedInterfaces.cpp
index 6d1306d57..b4f32bcde 100644
--- a/nn/runtime/test/TestVersionedInterfaces.cpp
+++ b/nn/runtime/test/TestVersionedInterfaces.cpp
@@ -22,6 +22,7 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <hidl/Status.h>
+#include <nnapi/TypeUtils.h>
#include <utils/Errors.h>
#include <limits>
@@ -37,7 +38,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using testing::_;
using testing::Invoke;
using testing::InvokeWithoutArgs;
@@ -45,40 +45,59 @@ using testing::MockFunction;
using MockDeviceFactory = MockFunction<sp<V1_0::IDevice>(bool blocking)>;
constexpr uint32_t kNoCacheFilesNeeded = 0;
-constexpr uint32_t kMaxNumberOfCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
-constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
- .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming12 = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
+ .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
+constexpr Timing kNoTiming = {};
template <typename... Args>
auto makeCallbackReturn(Args&&... args) {
return [argPack = std::make_tuple(std::forward<Args>(args)...)](const auto& cb) {
std::apply(cb, argPack);
- return Void();
+ return hardware::Void();
};
};
-class MockDevice : public IDevice {
+class MockDevice : public V1_3::IDevice {
public:
static sp<MockDevice> create() {
const sp<MockDevice> mockDevice = new MockDevice();
- const auto linkToDeathRet_ret = []() -> Return<bool> { return true; };
- const auto getCapabilities_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_0::Capabilities{});
+ const auto linkToDeathRet_ret = []() -> hardware::Return<bool> { return true; };
+ const auto getCapabilities_ret = makeCallbackReturn(
+ V1_0::ErrorStatus::NONE, V1_0::Capabilities{
+ .float32Performance = kNoPerformanceInfo,
+ .quantized8Performance = kNoPerformanceInfo,
+ });
const auto getCapabilities_1_1_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_1::Capabilities{});
+ makeCallbackReturn(V1_0::ErrorStatus::NONE,
+ V1_1::Capabilities{
+ .float32Performance = kNoPerformanceInfo,
+ .quantized8Performance = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16Performance = kNoPerformanceInfo,
+ });
const auto getVersionString_ret =
makeCallbackReturn(V1_0::ErrorStatus::NONE, "Google-MockV1");
- const auto getType_ret = makeCallbackReturn(V1_0::ErrorStatus::NONE, DeviceType::OTHER);
- const auto getCapabilities_1_2_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::Capabilities{});
+ const auto getType_ret =
+ makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::DeviceType::OTHER);
+ const auto getCapabilities_1_2_ret = makeCallbackReturn(
+ V1_0::ErrorStatus::NONE,
+ V1_2::Capabilities{
+ .relaxedFloat32toFloat16PerformanceScalar = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = kNoPerformanceInfo,
+ });
const auto getSupportedExtensions_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, hidl_vec<Extension>{});
+ makeCallbackReturn(V1_0::ErrorStatus::NONE, hardware::hidl_vec<V1_2::Extension>{});
const auto getNumberOfCacheFilesNeeded_ret = makeCallbackReturn(
V1_0::ErrorStatus::NONE, kMaxNumberOfCacheFiles, kMaxNumberOfCacheFiles);
- const auto getCapabilities_1_3_ret =
- makeCallbackReturn(V1_3::ErrorStatus::NONE, V1_3::Capabilities{});
+ const auto getCapabilities_1_3_ret = makeCallbackReturn(
+ V1_3::ErrorStatus::NONE,
+ V1_3::Capabilities{
+ .relaxedFloat32toFloat16PerformanceScalar = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = kNoPerformanceInfo,
+ .ifPerformance = kNoPerformanceInfo,
+ .whilePerformance = kNoPerformanceInfo,
+ });
ON_CALL(*mockDevice, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret));
ON_CALL(*mockDevice, getCapabilities(_)).WillByDefault(Invoke(getCapabilities_ret));
@@ -108,73 +127,82 @@ class MockDevice : public IDevice {
}
// IBase methods below.
- Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient,
- uint64_t /*cookie*/) override {
+ hardware::Return<bool> linkToDeath(const sp<hardware::hidl_death_recipient>& recipient,
+ uint64_t /*cookie*/) override {
mDeathRecipient = recipient;
return linkToDeathRet();
}
- MOCK_METHOD(Return<void>, ping, (), (override));
+ MOCK_METHOD(hardware::Return<void>, ping, (), (override));
// V1_0 methods below.
- MOCK_METHOD(Return<void>, getCapabilities, (getCapabilities_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities, (getCapabilities_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations,
(const V1_0::Model& model, getSupportedOperations_cb cb), (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel,
(const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<DeviceStatus>, getStatus, (), (override));
+ MOCK_METHOD(hardware::Return<V1_0::DeviceStatus>, getStatus, (), (override));
// V1_1 methods below.
- MOCK_METHOD(Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_1,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_1,
(const V1_1::Model& model, getSupportedOperations_1_1_cb cb), (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_1,
- (const V1_1::Model& model, ExecutionPreference preference,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel_1_1,
+ (const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& callback),
(override));
// V1_2 methods below.
- MOCK_METHOD(Return<void>, getVersionString, (getVersionString_cb cb), (override));
- MOCK_METHOD(Return<void>, getType, (getType_cb cb), (override));
- MOCK_METHOD(Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_2,
- (const V1_2::Model& model, getSupportedOperations_1_2_cb cb), (override));
- MOCK_METHOD(Return<void>, getNumberOfCacheFilesNeeded, (getNumberOfCacheFilesNeeded_cb cb),
+ MOCK_METHOD(hardware::Return<void>, getVersionString, (getVersionString_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getType, (getType_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb),
(override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_2,
- (const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_2,
+ (const V1_2::Model& model, getSupportedOperations_1_2_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getNumberOfCacheFilesNeeded,
+ (getNumberOfCacheFilesNeeded_cb cb), (override));
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel_1_2,
+ (const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModelFromCache,
- (const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModelFromCache,
+ (const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
(override));
// V1_3 methods below.
- MOCK_METHOD(Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_3,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_3,
(const V1_3::Model& model, getSupportedOperations_1_3_cb cb), (override));
- MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModel_1_3,
- (const V1_3::Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<V1_3::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, prepareModel_1_3,
+ (const V1_3::Model& model, V1_1::ExecutionPreference preference,
+ V1_3::Priority priority, const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3,
- (const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<V1_3::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3,
+ (const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, allocate,
- (const BufferDesc& desc, const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles,
- allocate_cb cb),
+ MOCK_METHOD(hardware::Return<void>, allocate,
+ (const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb),
(override));
// Helper methods.
- MOCK_METHOD(Return<bool>, linkToDeathRet, ());
+ MOCK_METHOD(hardware::Return<bool>, linkToDeathRet, ());
void simulateCrash() {
ASSERT_NE(nullptr, mDeathRecipient.get());
@@ -189,15 +217,15 @@ class MockDevice : public IDevice {
private:
// Members.
- sp<hidl_death_recipient> mDeathRecipient;
+ sp<hardware::hidl_death_recipient> mDeathRecipient;
};
-class MockPreparedModel : public IPreparedModel {
+class MockPreparedModel : public V1_3::IPreparedModel {
public:
static sp<MockPreparedModel> create() {
const sp<MockPreparedModel> mockPreparedModel = new MockPreparedModel();
- const auto linkToDeathRet_ret = []() -> Return<bool> { return true; };
+ const auto linkToDeathRet_ret = []() -> hardware::Return<bool> { return true; };
ON_CALL(*mockPreparedModel, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret));
// This EXPECT_CALL(...).Times(testing::AnyNumber()) calls are to
@@ -208,27 +236,28 @@ class MockPreparedModel : public IPreparedModel {
}
// IBase methods below.
- Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient,
- uint64_t /*cookie*/) override {
+ hardware::Return<bool> linkToDeath(const sp<hardware::hidl_death_recipient>& recipient,
+ uint64_t /*cookie*/) override {
mDeathRecipient = recipient;
return linkToDeathRet();
}
- MOCK_METHOD(Return<void>, ping, (), (override));
+ MOCK_METHOD(hardware::Return<void>, ping, (), (override));
// V1_0 methods below.
- MOCK_METHOD(Return<V1_0::ErrorStatus>, execute,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, execute,
(const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback),
(override));
// V1_2 methods below.
- MOCK_METHOD(Return<V1_0::ErrorStatus>, execute_1_2,
- (const V1_0::Request& request, MeasureTiming measure,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, execute_1_2,
+ (const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, executeSynchronously,
- (const V1_0::Request& request, MeasureTiming measure, executeSynchronously_cb cb),
+ MOCK_METHOD(hardware::Return<void>, executeSynchronously,
+ (const V1_0::Request& request, V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb),
(override));
- MOCK_METHOD(Return<void>, configureExecutionBurst,
+ MOCK_METHOD(hardware::Return<void>, configureExecutionBurst,
(const sp<V1_2::IBurstCallback>& callback,
const hardware::MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -236,27 +265,28 @@ class MockPreparedModel : public IPreparedModel {
(override));
// V1_3 methods below.
- MOCK_METHOD(Return<ErrorStatus>, execute_1_3,
- (const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<IExecutionCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, execute_1_3,
+ (const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, executeSynchronously_1_3,
- (const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+ MOCK_METHOD(hardware::Return<void>, executeSynchronously_1_3,
+ (const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
executeSynchronously_1_3_cb cb),
(override));
- MOCK_METHOD(Return<void>, executeFenced,
- (const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration, executeFenced_cb cb),
+ MOCK_METHOD(hardware::Return<void>, executeFenced,
+ (const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb),
(override));
// Helper methods.
- MOCK_METHOD(Return<bool>, linkToDeathRet, ());
+ MOCK_METHOD(hardware::Return<bool>, linkToDeathRet, ());
void simulateCrash() {
ASSERT_NE(nullptr, mDeathRecipient.get());
@@ -271,27 +301,29 @@ class MockPreparedModel : public IPreparedModel {
private:
// Members.
- sp<hidl_death_recipient> mDeathRecipient;
+ sp<hardware::hidl_death_recipient> mDeathRecipient;
};
class MockBurstContext : public V1_2::IBurstContext {
public:
// V1_2 methods below.
- MOCK_METHOD(Return<void>, freeMemory, (int32_t slot), (override));
+ MOCK_METHOD(hardware::Return<void>, freeMemory, (int32_t slot), (override));
};
-class MockFencedExecutionCallback : public IFencedExecutionCallback {
+class MockFencedExecutionCallback : public V1_3::IFencedExecutionCallback {
public:
// V1_3 methods below.
- MOCK_METHOD(Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override));
};
-class MockBuffer : public IBuffer {
+class MockBuffer : public V1_3::IBuffer {
public:
// V1_3 methods below.
- MOCK_METHOD(Return<ErrorStatus>, copyTo, (const hidl_memory& dst), (override));
- MOCK_METHOD(Return<ErrorStatus>, copyFrom,
- (const hidl_memory& src, const hidl_vec<uint32_t>& dimensions), (override));
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, copyTo, (const hardware::hidl_memory& dst),
+ (override));
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, copyFrom,
+ (const hardware::hidl_memory& src, const hardware::hidl_vec<uint32_t>& dimensions),
+ (override));
};
enum class Version { V1_0, V1_1, V1_2, V1_3, MOCK };
@@ -315,18 +347,19 @@ sp<V1_0::IDevice> adaptAs(const sp<MockDevice>& mockDevice, Version version) {
auto makePreparedModelReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
- return [launchStatus, returnStatus, preparedModel](
- const V1_0::Model& /*model*/,
- const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ return [launchStatus, returnStatus, preparedModel](const V1_0::Model& /*model*/,
+ const sp<V1_0::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus, preparedModel).isOk();
return launchStatus;
};
}
auto makePreparedModel_1_1Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
- return [launchStatus, returnStatus, preparedModel](
- const V1_1::Model& /*model*/, ExecutionPreference /*preference*/,
- const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ return [launchStatus, returnStatus, preparedModel](const V1_1::Model& /*model*/,
+ V1_1::ExecutionPreference /*preference*/,
+ const sp<V1_0::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -334,9 +367,10 @@ auto makePreparedModel_1_1Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStat
auto makePreparedModel_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
- const V1_2::Model& /*model*/, ExecutionPreference /*preference*/,
+ const V1_2::Model& /*model*/, V1_1::ExecutionPreference /*preference*/,
const auto& /*modelCache*/, const auto& /*dataCache*/, const auto& /*token*/,
- const sp<V1_2::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const sp<V1_2::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify_1_2(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -344,11 +378,12 @@ auto makePreparedModel_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStat
auto makePreparedModel_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
- const V1_3::Model& /*model*/, ExecutionPreference /*preference*/,
- Priority /*priority*/, const OptionalTimePoint& /*deadline*/,
- const hidl_vec<hidl_handle>& /*modelCache*/,
- const hidl_vec<hidl_handle>& /*dataCache*/, const CacheToken& /*token*/,
- const sp<V1_3::IPreparedModelCallback>& cb) -> Return<V1_3::ErrorStatus> {
+ const V1_3::Model& /*model*/, V1_1::ExecutionPreference /*preference*/,
+ V1_3::Priority /*priority*/, const V1_3::OptionalTimePoint& /*deadline*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*modelCache*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*dataCache*/,
+ const HalCacheToken& /*token*/, const sp<V1_3::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_3::ErrorStatus> {
cb->notify_1_3(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -357,51 +392,53 @@ auto makePreparedModel_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStat
auto makeExecuteReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus) {
return [launchStatus, returnStatus](
const V1_0::Request& /*request*/,
- const sp<V1_0::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const sp<V1_0::IExecutionCallback>& cb) -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus);
return launchStatus;
};
}
auto makeExecute_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
- const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [launchStatus, returnStatus, outputShapes, timing](
- const V1_0::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const sp<V1_2::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const V1_0::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const sp<V1_2::IExecutionCallback>& cb) -> hardware::Return<V1_0::ErrorStatus> {
cb->notify_1_2(returnStatus, outputShapes, timing);
return launchStatus;
};
}
auto makeExecute_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus,
- const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [launchStatus, returnStatus, outputShapes, timing](
- const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
- const sp<V1_3::IExecutionCallback>& cb) -> Return<V1_3::ErrorStatus> {
+ const V1_3::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const sp<V1_3::IExecutionCallback>& cb) -> hardware::Return<V1_3::ErrorStatus> {
cb->notify_1_3(returnStatus, outputShapes, timing);
return launchStatus;
};
}
auto makeExecuteSynchronouslyReturn(V1_0::ErrorStatus status,
- const std::vector<OutputShape>& outputShapes,
- const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [status, outputShapes, timing](const V1_0::Request& /*request*/,
- MeasureTiming /*measureTiming*/,
+ V1_2::MeasureTiming /*measureTiming*/,
const V1_2::IPreparedModel::executeSynchronously_cb& cb) {
cb(status, outputShapes, timing);
- return Void();
+ return hardware::Void();
};
}
auto makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus status,
- const std::vector<OutputShape>& outputShapes,
- const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [status, outputShapes, timing](
- const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const V1_3::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
const V1_3::IPreparedModel::executeSynchronously_1_3_cb& cb) {
cb(status, outputShapes, timing);
- return Void();
+ return hardware::Void();
};
}
auto makeConfigureExecutionBurst(V1_0::ErrorStatus status,
@@ -412,19 +449,20 @@ auto makeConfigureExecutionBurst(V1_0::ErrorStatus status,
const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/,
V1_2::IPreparedModel::configureExecutionBurst_cb cb) {
cb(status, burstContext);
- return Void();
+ return hardware::Void();
};
}
-auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hidl_handle& syncFence,
- const sp<IFencedExecutionCallback>& dispatchCallback) {
+auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hardware::hidl_handle& syncFence,
+ const sp<V1_3::IFencedExecutionCallback>& dispatchCallback) {
return [status, syncFence, dispatchCallback](
- const V1_3::Request& /*request*/, const hidl_vec<hidl_handle>& /*waitFor*/,
- MeasureTiming /*measure*/, const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
- const OptionalTimeoutDuration& /*duration*/,
+ const V1_3::Request& /*request*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*waitFor*/,
+ V1_2::MeasureTiming /*measure*/, const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const V1_3::OptionalTimeoutDuration& /*duration*/,
V1_3::IPreparedModel::executeFenced_cb cb) {
cb(status, syncFence, dispatchCallback);
- return Void();
+ return hardware::Void();
};
}
@@ -516,7 +554,7 @@ std::shared_ptr<VersionedIDevice> makeVersionedIDeviceFrom(const sp<MockDevice>&
const auto device = adaptAs(mockDevice, version);
ON_CALL(*mockDeviceFactory, Call(_)).WillByDefault(testing::Return(device));
EXPECT_CALL(*mockDeviceFactory, Call(/*blocking=*/true)).Times(testing::AtLeast(1));
- const DeviceFactory makeDevice = mockDeviceFactory->AsStdFunction();
+ const HalDeviceFactory makeDevice = mockDeviceFactory->AsStdFunction();
return VersionedIDevice::create("MockDevice", makeDevice);
}
@@ -566,7 +604,7 @@ class VersionedIDeviceMockTest : public VersionedIDeviceInitializedTest<Version:
TEST_F(VersionedIDeviceInitializationTest, creationFailure) {
// setup failure
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(nullptr));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -581,7 +619,7 @@ TEST_F(VersionedIDeviceInitializationTest, linkToDeathTransportFailure) {
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -592,10 +630,10 @@ TEST_F(VersionedIDeviceInitializationTest, linkToDeathTransportFailure) {
TEST_F(VersionedIDeviceInitializationTest, linkToDeathReturnError) {
// setup failure
- const auto ret = []() -> Return<bool> { return false; };
+ const auto ret = []() -> hardware::Return<bool> { return false; };
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice));
EXPECT_CALL(*kMockDevice, linkToDeathRet()).Times(1).WillOnce(InvokeWithoutArgs(ret));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -666,7 +704,8 @@ TEST_F(VersionedIDeviceInitializationTest, getVersionStringFailure) {
TEST_F(VersionedIDeviceInitializationTest, getTypeFailure) {
// setup failure
- const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, DeviceType::OTHER);
+ const auto ret =
+ makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_2::DeviceType::OTHER);
EXPECT_CALL(*kMockDevice, getType(_)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -678,7 +717,8 @@ TEST_F(VersionedIDeviceInitializationTest, getTypeFailure) {
TEST_F(VersionedIDeviceInitializationTest, getSupportedExtensionsFailure) {
// setup failure
- const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, hidl_vec<Extension>{});
+ const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE,
+ hardware::hidl_vec<V1_2::Extension>{});
EXPECT_CALL(*kMockDevice, getSupportedExtensions(_)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -839,9 +879,11 @@ TEST_F(VersionedIDeviceV1_0Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_LT(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_LT(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -851,9 +893,11 @@ TEST_F(VersionedIDeviceV1_1Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_LT(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_LT(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -863,9 +907,11 @@ TEST_F(VersionedIDeviceV1_2Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_EQ(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_EQ(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -875,9 +921,11 @@ TEST_F(VersionedIDeviceV1_3Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_EQ(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_EQ(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -1107,16 +1155,16 @@ TEST_F(VersionedIDeviceV1_0Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1124,16 +1172,16 @@ TEST_F(VersionedIDeviceV1_1Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1141,16 +1189,16 @@ TEST_F(VersionedIDeviceV1_2Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1158,16 +1206,16 @@ TEST_F(VersionedIDeviceV1_3Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_3::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1179,7 +1227,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModel) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1195,7 +1243,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModel) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1211,7 +1259,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModel) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1229,7 +1277,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModel) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1271,13 +1319,14 @@ TEST_F(VersionedIDeviceV1_3Test, allocate) {
// setup call
const sp<MockBuffer> mockBuffer = new MockBuffer();
constexpr uint32_t mockToken = 1;
- const auto ret = [mockBuffer](const BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<BufferRole>& /*inputRoles*/,
- const hidl_vec<BufferRole>& /*outputRoles*/,
- V1_3::IDevice::allocate_cb cb) -> Return<void> {
+ const auto ret = [mockBuffer](
+ const V1_3::BufferDesc& /*desc*/,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ V1_3::IDevice::allocate_cb cb) -> hardware::Return<void> {
cb(V1_3::ErrorStatus::NONE, mockBuffer, mockToken);
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
@@ -1292,7 +1341,7 @@ TEST_F(VersionedIDeviceV1_3Test, allocate) {
TEST_F(VersionedIDeviceMockTest, wait) {
// setup call
- const auto ret = []() -> Return<void> { return {}; };
+ const auto ret = []() -> hardware::Return<void> { return {}; };
EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(Invoke(ret));
// run test
@@ -1308,16 +1357,16 @@ TEST_F(VersionedIDeviceV1_0Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1325,16 +1374,16 @@ TEST_F(VersionedIDeviceV1_1Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1342,16 +1391,16 @@ TEST_F(VersionedIDeviceV1_2Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1359,16 +1408,16 @@ TEST_F(VersionedIDeviceV1_3Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_3::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1380,7 +1429,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelLaunchFailure) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1396,7 +1445,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelLaunchFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1412,7 +1461,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelLaunchFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1430,7 +1479,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelLaunchFailure) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1446,7 +1495,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelReturnFailure) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1462,7 +1511,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelReturnFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1478,7 +1527,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelReturnFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1496,7 +1545,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelReturnFailure) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1512,7 +1561,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelNullptrError) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1528,7 +1577,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelNullptrError) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1544,7 +1593,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelNullptrError) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1562,7 +1611,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelNullptrError) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1572,13 +1621,13 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelNullptrError) {
TEST_F(VersionedIDeviceV1_3Test, allocateFailure) {
// setup failure
- const auto ret = [](const BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<BufferRole>& /*inputRoles*/,
- const hidl_vec<BufferRole>& /*outputRoles*/,
- V1_3::IDevice::allocate_cb cb) -> Return<void> {
+ const auto ret = [](const V1_3::BufferDesc& /*desc*/,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ V1_3::IDevice::allocate_cb cb) -> hardware::Return<void> {
cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
@@ -1600,11 +1649,11 @@ TEST_F(VersionedIDeviceV1_0Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1615,11 +1664,11 @@ TEST_F(VersionedIDeviceV1_1Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1630,11 +1679,11 @@ TEST_F(VersionedIDeviceV1_2Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1645,11 +1694,11 @@ TEST_F(VersionedIDeviceV1_3Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1660,7 +1709,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1675,7 +1724,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1690,7 +1739,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1705,7 +1754,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1767,7 +1816,7 @@ TEST_F(VersionedIDeviceMockTest, DISABLED_prepareModelRecoverCrash) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1788,7 +1837,7 @@ TEST_F(VersionedIDeviceMockTest, prepareModelFullCrash) {
.WillOnce(testing::Return(nullptr));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1798,7 +1847,7 @@ TEST_F(VersionedIDeviceMockTest, prepareModelFullCrash) {
TEST_F(VersionedIDeviceMockTest, prepareModelAsyncCrash) {
// setup failure
- const auto ret = [this]() -> Return<V1_3::ErrorStatus> {
+ const auto ret = [this]() -> hardware::Return<V1_3::ErrorStatus> {
kMockDevice->simulateCrash();
return V1_3::ErrorStatus::NONE;
};
@@ -1807,7 +1856,7 @@ TEST_F(VersionedIDeviceMockTest, prepareModelAsyncCrash) {
.WillOnce(InvokeWithoutArgs(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1842,7 +1891,7 @@ TEST_F(VersionedIDeviceMockTest, waitRecoverCrash) {
.WillOnce(testing::Return(mockRecoveredDevice));
// setup recovered device calls
- const auto ret = []() -> Return<bool> { return true; };
+ const auto ret = []() -> hardware::Return<bool> { return true; };
EXPECT_CALL(*mockRecoveredDevice, linkToDeathRet()).Times(1).WillOnce(Invoke(ret));
// run test
@@ -1903,7 +1952,7 @@ std::shared_ptr<VersionedIPreparedModel> makeVersionedIPreparedModelSuccessfulIn
EXPECT_CALL(*mockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(*mockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)).Times(testing::AnyNumber());
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = device.prepareModel(makeModel, {}, {}, {}, {}, {});
CHECK_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
@@ -1948,7 +1997,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathTransportFailure) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1968,7 +2017,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathDeadObject) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1980,7 +2029,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathReturnError) {
// setup failure
EXPECT_CALL(*kMockPreparedModel, linkToDeathRet())
.Times(1)
- .WillOnce(InvokeWithoutArgs([]() -> Return<bool> { return false; }));
+ .WillOnce(InvokeWithoutArgs([]() -> hardware::Return<bool> { return false; }));
const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE,
kMockPreparedModel);
EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _))
@@ -1988,7 +2037,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathReturnError) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -2030,8 +2079,8 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeAsync) {
TEST_F(VersionedIPreparedModelV1_2Test, executeAsync) {
// setup call
- const auto ret =
- makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {},
+ kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2046,8 +2095,8 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeAsync) {
TEST_F(VersionedIPreparedModelV1_3Test, executeAsync) {
// setup call
- const auto ret =
- makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {},
+ kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2092,7 +2141,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executePreferSync) {
TEST_F(VersionedIPreparedModelV1_2Test, executePreferSync) {
// setup call
- const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2107,7 +2156,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executePreferSync) {
TEST_F(VersionedIPreparedModelV1_3Test, executePreferSync) {
// setup call
- const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(ret));
@@ -2156,7 +2205,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFenced) {
TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) {
// setup call
- const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2173,8 +2222,8 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) {
TEST_F(VersionedIPreparedModelV1_3Test, executeFenced) {
// setup call
auto memory = allocateSharedMemory(4);
- hidl_handle fakeSyncFence(memory.handle());
- const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
+ hardware::hidl_handle fakeSyncFence(memory.handle());
+ const sp<V1_3::IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
const auto ret = makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, fakeSyncFence, callback);
EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
@@ -2276,7 +2325,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncLaunchFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchFailure) {
// setup failure
const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::GENERAL_FAILURE,
- V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2292,7 +2341,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncLaunchFailure) {
// setup failure
const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE,
- V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ V1_3::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2338,7 +2387,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncReturnFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncReturnFailure) {
// setup failure
const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE,
- V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2354,7 +2403,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncReturnFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncReturnFailure) {
// setup failure
const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE,
- V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2402,7 +2451,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executePreferSyncFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2418,7 +2467,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(ret));
@@ -2470,7 +2519,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFencedFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2487,8 +2536,8 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executeFencedFailure) {
// setup failure
auto memory = allocateSharedMemory(4);
- hidl_handle fakeSyncFence(memory.handle());
- const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
+ hardware::hidl_handle fakeSyncFence(memory.handle());
+ const sp<V1_3::IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
const auto ret =
makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, fakeSyncFence, callback);
EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _))
@@ -2894,7 +2943,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncCrash) {
TEST_F(VersionedIPreparedModelMockTest, executeAsyncReturnCrash) {
// setup failure
- const auto ret = [this]() -> Return<V1_3::ErrorStatus> {
+ const auto ret = [this]() -> hardware::Return<V1_3::ErrorStatus> {
kMockPreparedModel->simulateCrash();
return V1_3::ErrorStatus::NONE;
};
diff --git a/nn/runtime/test/android_fuzzing/Converter.cpp b/nn/runtime/test/android_fuzzing/Converter.cpp
index ca853aefc..c2fc354fa 100644
--- a/nn/runtime/test/android_fuzzing/Converter.cpp
+++ b/nn/runtime/test/android_fuzzing/Converter.cpp
@@ -29,39 +29,38 @@ namespace android::nn::fuzz {
namespace {
using namespace test_helper;
-using namespace android_nn_fuzz;
constexpr uint32_t kMaxSize = 65536;
-TestOperandType convert(OperandType type) {
+TestOperandType convert(android_nn_fuzz::OperandType type) {
return static_cast<TestOperandType>(type);
}
-TestOperationType convert(OperationType type) {
+TestOperationType convert(android_nn_fuzz::OperationType type) {
return static_cast<TestOperationType>(type);
}
-TestOperandLifeTime convert(OperandLifeTime lifetime) {
+TestOperandLifeTime convert(android_nn_fuzz::OperandLifeTime lifetime) {
return static_cast<TestOperandLifeTime>(lifetime);
}
-std::vector<float> convert(const Scales& scales) {
+std::vector<float> convert(const android_nn_fuzz::Scales& scales) {
const auto& repeatedScale = scales.scale();
return std::vector<float>(repeatedScale.begin(), repeatedScale.end());
}
-TestSymmPerChannelQuantParams convert(const SymmPerChannelQuantParams& params) {
+TestSymmPerChannelQuantParams convert(const android_nn_fuzz::SymmPerChannelQuantParams& params) {
std::vector<float> scales = convert(params.scales());
const uint32_t channelDim = params.channel_dim();
return {.scales = std::move(scales), .channelDim = channelDim};
}
-std::vector<uint32_t> convert(const Dimensions& dimensions) {
+std::vector<uint32_t> convert(const android_nn_fuzz::Dimensions& dimensions) {
const auto& repeatedDimension = dimensions.dimension();
return std::vector<uint32_t>(repeatedDimension.begin(), repeatedDimension.end());
}
-TestBuffer convert(size_t size, const Buffer& buffer) {
+TestBuffer convert(size_t size, const android_nn_fuzz::Buffer& buffer) {
if (size == 0) {
return TestBuffer();
}
@@ -70,7 +69,7 @@ TestBuffer convert(size_t size, const Buffer& buffer) {
return TestBuffer::createRandom(size % kMaxSize, &generator);
}
-TestOperand convert(const Operand& operand) {
+TestOperand convert(const android_nn_fuzz::Operand& operand) {
const TestOperandType type = convert(operand.type());
std::vector<uint32_t> dimensions = convert(operand.dimensions());
const float scale = operand.scale();
@@ -79,7 +78,7 @@ TestOperand convert(const Operand& operand) {
auto channelQuant = convert(operand.channel_quant());
const bool isIgnored = false;
- const auto halType = static_cast<hal::OperandType>(type);
+ const auto halType = static_cast<V1_3::OperandType>(type);
const bool willOverflow = nonExtensionOperandSizeOfDataOverflowsUInt32(halType, dimensions);
const bool makeEmpty = (lifetime == TestOperandLifeTime::NO_VALUE ||
lifetime == TestOperandLifeTime::TEMPORARY_VARIABLE || willOverflow);
@@ -97,7 +96,7 @@ TestOperand convert(const Operand& operand) {
.data = std::move(data)};
}
-std::vector<TestOperand> convert(const Operands& operands) {
+std::vector<TestOperand> convert(const android_nn_fuzz::Operands& operands) {
std::vector<TestOperand> testOperands;
testOperands.reserve(operands.operand_size());
const auto& repeatedOperand = operands.operand();
@@ -106,19 +105,19 @@ std::vector<TestOperand> convert(const Operands& operands) {
return testOperands;
}
-std::vector<uint32_t> convert(const Indexes& indexes) {
+std::vector<uint32_t> convert(const android_nn_fuzz::Indexes& indexes) {
const auto& repeatedIndex = indexes.index();
return std::vector<uint32_t>(repeatedIndex.begin(), repeatedIndex.end());
}
-TestOperation convert(const Operation& operation) {
+TestOperation convert(const android_nn_fuzz::Operation& operation) {
const TestOperationType type = convert(operation.type());
std::vector<uint32_t> inputs = convert(operation.inputs());
std::vector<uint32_t> outputs = convert(operation.outputs());
return {.type = type, .inputs = std::move(inputs), .outputs = std::move(outputs)};
}
-std::vector<TestOperation> convert(const Operations& operations) {
+std::vector<TestOperation> convert(const android_nn_fuzz::Operations& operations) {
std::vector<TestOperation> testOperations;
testOperations.reserve(operations.operation_size());
const auto& repeatedOperation = operations.operation();
@@ -142,7 +141,7 @@ void calculateNumberOfConsumers(const std::vector<TestOperation>& operations,
std::for_each(operations.begin(), operations.end(), addAllConsumers);
}
-TestModel convert(const Model& model) {
+TestModel convert(const android_nn_fuzz::Model& model) {
std::vector<TestOperand> operands = convert(model.operands());
std::vector<TestOperation> operations = convert(model.operations());
std::vector<uint32_t> inputIndexes = convert(model.input_indexes());
@@ -161,7 +160,7 @@ TestModel convert(const Model& model) {
} // anonymous namespace
-TestModel convertToTestModel(const Test& model) {
+TestModel convertToTestModel(const android_nn_fuzz::Test& model) {
return convert(model.model());
}
diff --git a/nn/runtime/test/android_fuzzing/FuzzHarness.cpp b/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
index 3d787d68f..76c34a75a 100644
--- a/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
+++ b/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
@@ -31,7 +31,7 @@ namespace {
using ::android::nn::nonExtensionOperandSizeOfDataOverflowsUInt32;
using ::android::nn::fuzz::convertToTestModel;
-using ::android::nn::hal::OperandType;
+using ::android::nn::V1_3::OperandType;
using ::test_helper::TestModel;
using ::test_helper::TestOperand;
diff --git a/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp b/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp
index 2f72b9da4..783b66092 100644
--- a/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp
+++ b/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp
@@ -41,8 +41,8 @@ OperationType convert(TestOperationType type) {
return static_cast<OperationType>(type);
}
-OperandLifeTime convert(TestOperandLifeTime lifetime) {
- return static_cast<OperandLifeTime>(lifetime);
+Operand::LifeTime convert(TestOperandLifeTime lifetime) {
+ return static_cast<Operand::LifeTime>(lifetime);
}
Scales convert(const std::vector<float>& scales) {
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
index c48829867..66023c1db 100644
--- a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -20,6 +20,7 @@
#include <vector>
+#include <nnapi/Types.h>
#include "FibonacciExtension.h"
#include "HalInterfaces.h"
#include "NeuralNetworksExtensions.h"
@@ -33,10 +34,7 @@ namespace nn {
namespace sample_driver {
namespace {
-using namespace hal;
-
-const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
-const uint32_t kTypeWithinExtensionMask = (1 << kLowBitsType) - 1;
+const uint32_t kTypeWithinExtensionMask = (1 << kExtensionTypeBits) - 1;
namespace fibonacci_op {
@@ -48,22 +46,22 @@ constexpr uint32_t kInputN = 0;
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-bool getFibonacciExtensionPrefix(const Model& model, uint16_t* prefix) {
+bool getFibonacciExtensionPrefix(const V1_3::Model& model, uint16_t* prefix) {
NN_RET_CHECK_EQ(model.extensionNameToPrefix.size(), 1u); // Assumes no other extensions in use.
NN_RET_CHECK_EQ(model.extensionNameToPrefix[0].name, EXAMPLE_FIBONACCI_EXTENSION_NAME);
*prefix = model.extensionNameToPrefix[0].prefix;
return true;
}
-bool isFibonacciOperation(const Operation& operation, const Model& model) {
+bool isFibonacciOperation(const V1_3::Operation& operation, const V1_3::Model& model) {
int32_t operationType = static_cast<int32_t>(operation.type);
uint16_t prefix;
NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
- NN_RET_CHECK_EQ(operationType, (prefix << kLowBitsType) | EXAMPLE_FIBONACCI);
+ NN_RET_CHECK_EQ(operationType, (prefix << kExtensionTypeBits) | EXAMPLE_FIBONACCI);
return true;
}
-bool validate(const Operation& operation, const Model& model) {
+bool validate(const V1_3::Operation& operation, const V1_3::Model& model) {
NN_RET_CHECK(isFibonacciOperation(operation, model));
NN_RET_CHECK_EQ(operation.inputs.size(), kNumInputs);
NN_RET_CHECK_EQ(operation.outputs.size(), kNumOutputs);
@@ -71,9 +69,9 @@ bool validate(const Operation& operation, const Model& model) {
int32_t outputType = static_cast<int32_t>(model.main.operands[operation.outputs[0]].type);
uint16_t prefix;
NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
- NN_RET_CHECK(inputType == ((prefix << kLowBitsType) | EXAMPLE_INT64) ||
+ NN_RET_CHECK(inputType == ((prefix << kExtensionTypeBits) | EXAMPLE_INT64) ||
inputType == ANEURALNETWORKS_TENSOR_FLOAT32);
- NN_RET_CHECK(outputType == ((prefix << kLowBitsType) | EXAMPLE_TENSOR_QUANT64_ASYMM) ||
+ NN_RET_CHECK(outputType == ((prefix << kExtensionTypeBits) | EXAMPLE_TENSOR_QUANT64_ASYMM) ||
outputType == ANEURALNETWORKS_TENSOR_FLOAT32);
return true;
}
@@ -128,7 +126,7 @@ bool execute(IOperationExecutionContext* context) {
uint64_t* output = context->getOutputBuffer<uint64_t>(kOutputTensor);
Shape outputShape = context->getOutputShape(kOutputTensor);
auto outputQuant = reinterpret_cast<const ExampleQuant64AsymmParams*>(
- outputShape.extraParams.extension().data());
+ std::get<Operand::ExtensionParams>(outputShape.extraParams).data());
return compute(n, outputQuant->scale, outputQuant->zeroPoint, output);
}
}
@@ -142,14 +140,14 @@ const OperationRegistration* FibonacciOperationResolver::findOperation(
static OperationRegistration operationRegistration(operationType, fibonacci_op::kOperationName,
nullptr, fibonacci_op::prepare,
fibonacci_op::execute, {});
- uint16_t prefix = static_cast<int32_t>(operationType) >> kLowBitsType;
+ uint16_t prefix = static_cast<int32_t>(operationType) >> kExtensionTypeBits;
uint16_t typeWithinExtension = static_cast<int32_t>(operationType) & kTypeWithinExtensionMask;
// Assumes no other extensions in use.
return prefix != 0 && typeWithinExtension == EXAMPLE_FIBONACCI ? &operationRegistration
: nullptr;
}
-Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
+hardware::Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
cb(V1_0::ErrorStatus::NONE,
{
{
@@ -169,44 +167,44 @@ Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb c
},
},
});
- return Void();
+ return hardware::Void();
}
-Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- static const PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
- Capabilities capabilities = {
+ static const V1_0::PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
-Return<void> FibonacciDriver::getSupportedOperations_1_3(const V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) {
+hardware::Return<void> FibonacciDriver::getSupportedOperations_1_3(
+ const V1_3::Model& model, getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (!validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; ++i) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (fibonacci_op::isFibonacciOperation(operation, model)) {
if (!fibonacci_op::validate(operation, model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
supported[i] = true;
}
}
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
} // namespace sample_driver
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.h b/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
index 303edd809..7daf4d2de 100644
--- a/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
@@ -34,7 +34,7 @@ class FibonacciOperationResolver : public IOperationResolver {
return &instance;
}
- const OperationRegistration* findOperation(hal::OperationType operationType) const override;
+ const OperationRegistration* findOperation(OperationType operationType) const override;
private:
FibonacciOperationResolver() {}
@@ -45,10 +45,10 @@ class FibonacciOperationResolver : public IOperationResolver {
class FibonacciDriver : public SampleDriver {
public:
FibonacciDriver() : SampleDriver(kDriverName, FibonacciOperationResolver::get()) {}
- hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
- hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
- hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) override;
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
static constexpr char kDriverName[] = "sample-driver-fibonacci-extension";
};
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 9799ca00c..8ba763aad 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -44,7 +44,7 @@ using namespace test_helper;
RandomOperand::RandomOperand(const OperandSignature& operand, TestOperandType dataType,
uint32_t rank)
: type(operand.type), finalizer(operand.finalizer) {
- NN_FUZZER_LOG << "Operand: " << toString(type);
+ NN_FUZZER_LOG << "Operand: " << type;
if (operand.constructor) operand.constructor(dataType, rank, this);
}
@@ -81,7 +81,7 @@ size_t RandomOperand::getBufferSize() const {
// Construct a RandomOperation from OperationSignature.
RandomOperation::RandomOperation(const OperationSignature& operation)
: opType(operation.opType), finalizer(operation.finalizer) {
- NN_FUZZER_LOG << "Operation: " << toString(opType);
+ NN_FUZZER_LOG << "Operation: " << opType;
// Determine the data type and rank of the operation and invoke the constructor.
TestOperandType dataType = getRandomChoice(operation.supportedDataTypes);
@@ -294,14 +294,14 @@ TestModel RandomGraph::createTestModel() {
// Set model operations.
for (auto& operation : mOperations) {
- NN_FUZZER_LOG << "Operation: " << toString(operation.opType);
+ NN_FUZZER_LOG << "Operation: " << operation.opType;
TestOperation testOperation = {.type = static_cast<TestOperationType>(operation.opType)};
for (auto& op : operation.inputs) {
- NN_FUZZER_LOG << toString(*op);
+ NN_FUZZER_LOG << *op;
testOperation.inputs.push_back(op->opIndex);
}
for (auto& op : operation.outputs) {
- NN_FUZZER_LOG << toString(*op);
+ NN_FUZZER_LOG << *op;
testOperation.outputs.push_back(op->opIndex);
}
testModel.main.operations.push_back(std::move(testOperation));
diff --git a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index 1aa7fea41..8faae1271 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -119,18 +119,13 @@ class LoggerStream {
};
template <typename T>
-inline std::string toString(const T& obj) {
- return std::to_string(obj);
-}
-
-template <typename T>
inline std::string joinStr(const std::string& joint, const std::vector<T>& items) {
std::stringstream ss;
for (uint32_t i = 0; i < items.size(); i++) {
if (i == 0) {
- ss << toString(items[i]);
+ ss << items[i];
} else {
- ss << joint << toString(items[i]);
+ ss << joint << items[i];
}
}
return ss.str();
@@ -150,18 +145,15 @@ template <typename T>
inline std::string joinStr(const std::string& joint, int limit, const std::vector<T>& items) {
if (items.size() > static_cast<size_t>(limit)) {
std::vector<T> topMax(items.begin(), items.begin() + limit);
- return joinStr(joint, topMax) + ", (" + toString(items.size() - limit) + " ommited), " +
- toString(items.back());
+ std::stringstream ss;
+ ss << joinStr(joint, topMax) << ", (" << (items.size() - limit) << " omitted), "
+ << items.back();
+ return ss.str();
} else {
return joinStr(joint, items);
}
}
-static const char* kLifeTimeNames[6] = {
- "TEMPORARY_VARIABLE", "SUBGRAPH_INPUT", "SUBGRAPH_OUTPUT",
- "CONSTANT_COPY", "CONSTANT_REFERENCE", "NO_VALUE",
-};
-
static const bool kScalarDataType[]{
true, // ANEURALNETWORKS_FLOAT32
true, // ANEURALNETWORKS_INT32
@@ -198,10 +190,9 @@ static const uint32_t kSizeOfDataType[]{
1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
-template <>
-inline std::string toString<RandomVariableType>(const RandomVariableType& type) {
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableType& type) {
static const std::string typeNames[] = {"FREE", "CONST", "OP"};
- return typeNames[static_cast<int>(type)];
+ return os << typeNames[static_cast<int>(type)];
}
inline std::string alignedString(std::string str, int width) {
@@ -210,51 +201,45 @@ inline std::string alignedString(std::string str, int width) {
return str;
}
-template <>
-inline std::string toString<RandomVariableRange>(const RandomVariableRange& range) {
- return "[" + joinStr(", ", 20, range.getChoices()) + "]";
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableRange& range) {
+ return os << "[" + joinStr(", ", 20, range.getChoices()) + "]";
}
-template <>
-inline std::string toString<RandomOperandType>(const RandomOperandType& type) {
+inline std::ostream& operator<<(std::ostream& os, const RandomOperandType& type) {
static const std::string typeNames[] = {"Input", "Output", "Internal", "Parameter", "No Value"};
- return typeNames[static_cast<int>(type)];
+ return os << typeNames[static_cast<int>(type)];
}
-template <>
-inline std::string toString<RandomVariableNode>(const RandomVariableNode& var) {
- std::stringstream ss;
- ss << "var" << var->index << " = ";
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableNode& var) {
+ os << "var" << var->index << " = ";
switch (var->type) {
case RandomVariableType::FREE:
- ss << "FREE " << toString(var->range);
+ os << "FREE " << var->range;
break;
case RandomVariableType::CONST:
- ss << "CONST " << toString(var->value);
+ os << "CONST " << var->value;
break;
case RandomVariableType::OP:
- ss << "var" << var->parent1->index << " " << var->op->getName();
- if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
- ss << ", " << toString(var->range);
+ os << "var" << var->parent1->index << " " << var->op->getName();
+ if (var->parent2 != nullptr) os << " var" << var->parent2->index;
+ os << ", " << var->range;
break;
default:
NN_FUZZER_CHECK(false);
}
- ss << ", timestamp = " << var->timestamp;
- return ss.str();
+ os << ", timestamp = " << var->timestamp;
+ return os;
}
-template <>
-inline std::string toString<RandomVariable>(const RandomVariable& var) {
- return "var" + std::to_string(var.get()->index);
+inline std::ostream& operator<<(std::ostream& os, const RandomVariable& var) {
+ return os << "var" + std::to_string(var.get()->index);
}
-template <>
-inline std::string toString<RandomOperand>(const RandomOperand& op) {
- return toString(op.type) + ", dimension = [" +
- joinStr(", ", op.dimensions,
- [](const RandomVariable& var) { return std::to_string(var.getValue()); }) +
- "], scale = " + toString(op.scale) + " , zero_point = " + toString(op.zeroPoint);
+inline std::ostream& operator<<(std::ostream& os, const RandomOperand& op) {
+ return os << op.type << ", dimension = ["
+ << joinStr(", ", op.dimensions,
+ [](const RandomVariable& var) { return std::to_string(var.getValue()); })
+ << "], scale = " << op.scale << " , zero_point = " << op.zeroPoint;
}
// This class is a workaround for two issues our code relies on:
diff --git a/nn/runtime/test/fuzzing/RandomVariable.cpp b/nn/runtime/test/fuzzing/RandomVariable.cpp
index d3f6ef7e2..f1067e184 100644
--- a/nn/runtime/test/fuzzing/RandomVariable.cpp
+++ b/nn/runtime/test/fuzzing/RandomVariable.cpp
@@ -1,1225 +1,1225 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "RandomVariable.h"
-
-#include <algorithm>
-#include <memory>
-#include <set>
-#include <string>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "RandomGraphGeneratorUtils.h"
-
-namespace android {
-namespace nn {
-namespace fuzzing_test {
-
-unsigned int RandomVariableBase::globalIndex = 0;
-int RandomVariable::defaultValue = 10;
-
-RandomVariableBase::RandomVariableBase(int value)
- : index(globalIndex++),
- type(RandomVariableType::CONST),
- range(value),
- value(value),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(int lower, int upper)
- : index(globalIndex++),
- type(RandomVariableType::FREE),
- range(lower, upper),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(const std::vector<int>& choices)
- : index(globalIndex++),
- type(RandomVariableType::FREE),
- range(choices),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs,
- const std::shared_ptr<const IRandomVariableOp>& op)
- : index(globalIndex++),
- type(RandomVariableType::OP),
- range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)),
- op(op),
- parent1(lhs),
- parent2(rhs),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-void RandomVariableRange::setRange(int lower, int upper) {
- // kInvalidValue indicates unlimited bound.
- auto head = lower == kInvalidValue ? mChoices.begin()
- : std::lower_bound(mChoices.begin(), mChoices.end(), lower);
- auto tail = upper == kInvalidValue ? mChoices.end()
- : std::upper_bound(mChoices.begin(), mChoices.end(), upper);
- NN_FUZZER_CHECK(head <= tail) << "Invalid range!";
- if (head != mChoices.begin() || tail != mChoices.end()) {
- mChoices = std::vector<int>(head, tail);
- }
-}
-
-int RandomVariableRange::toConst() {
- if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)};
- return mChoices[0];
-}
-
-RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) {
- std::vector<int> result(lhs.size() + rhs.size());
- auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(),
- rhs.mChoices.end(), result.begin());
- result.resize(it - result.begin());
- return RandomVariableRange(std::move(result));
-}
-
-void RandomVariableBase::freeze() {
- if (type == RandomVariableType::CONST) return;
- value = range.toConst();
- type = RandomVariableType::CONST;
-}
-
-int RandomVariableBase::getValue() const {
- switch (type) {
- case RandomVariableType::CONST:
- return value;
- case RandomVariableType::OP:
- return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue());
- default:
- NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index;
- return 0;
- }
-}
-
-void RandomVariableBase::updateTimestamp() {
- timestamp = RandomVariableNetwork::get()->getGlobalTime();
- NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp;
-}
-
-RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(const std::vector<int>& choices)
- : mVar(new RandomVariableBase(choices)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(RandomVariableType type)
- : mVar(new RandomVariableBase(1, defaultValue)) {
- NN_FUZZER_CHECK(type == RandomVariableType::FREE);
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs,
- const std::shared_ptr<const IRandomVariableOp>& op)
- : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) {
- // Make a copy if the parent is CONST. This will resolve the fake dependency problem.
- if (mVar->parent1->type == RandomVariableType::CONST) {
- mVar->parent1 = RandomVariable(mVar->parent1->value).get();
- }
- if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) {
- mVar->parent2 = RandomVariable(mVar->parent2->value).get();
- }
- mVar->parent1->children.push_back(mVar);
- if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar);
- RandomVariableNetwork::get()->add(mVar);
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
-}
-
-void RandomVariable::setRange(int lower, int upper) {
- NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr";
- NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index;
- size_t oldSize = mVar->range.size();
- mVar->range.setRange(lower, upper);
- // Only update the timestamp if the range is *indeed* narrowed down.
- if (mVar->range.size() != oldSize) mVar->updateTimestamp();
-}
-
-RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const {
- std::set<int> st;
- for (auto i : lhs.getChoices()) {
- for (auto j : rhs.getChoices()) {
- int res = this->eval(i, j);
- if (res > kMaxValue || res < -kMaxValue) continue;
- st.insert(res);
- }
- }
- return RandomVariableRange(st);
-}
-
-// Check if the range contains exactly all values in [min, max].
-static inline bool isContinuous(const std::set<int>* range) {
- return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size());
-}
-
-// Fill the set with a range of values specified by [lower, upper].
-static inline void fillRange(std::set<int>* range, int lower, int upper) {
- for (int i = lower; i <= upper; i++) range->insert(i);
-}
-
-// The slowest algorithm: iterate through every combinations of parents and save the valid pairs.
-void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const {
- // Avoid the binary search if the child is a closed range.
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- int res = this->eval(i, j);
- // Avoid the binary search if obviously out of range.
- if (res > child.second || res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- parent2Out->insert(j);
- childOut->insert(res);
- valid = true;
- }
- }
- if (valid) parent1Out->insert(i);
- }
-}
-
-// A helper template to make a class into a Singleton.
-template <class T>
-class Singleton : public T {
- public:
- static const std::shared_ptr<const T>& get() {
- static std::shared_ptr<const T> instance(new T);
- return instance;
- }
-};
-
-// A set of operations that only compute on a single input value.
-class IUnaryOp : public IRandomVariableOp {
- public:
- using IRandomVariableOp::eval;
- virtual int eval(int val) const = 0;
- virtual int eval(int lhs, int) const override { return eval(lhs); }
- // The slowest algorithm: iterate through every value of the parent and save the valid one.
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- NN_FUZZER_CHECK(parent2In == nullptr);
- NN_FUZZER_CHECK(parent2Out == nullptr);
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- int res = this->eval(i);
- if (res > child.second || res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- parent1Out->insert(i);
- childOut->insert(res);
- }
- }
- }
-};
-
-// A set of operations that only check conditional constraints.
-class IConstraintOp : public IRandomVariableOp {
- public:
- using IRandomVariableOp::eval;
- virtual bool check(int lhs, int rhs) const = 0;
- virtual int eval(int lhs, int rhs) const override {
- return check(lhs, rhs) ? 0 : kInvalidValue;
- }
- // The range for a constraint op is always {0}.
- virtual RandomVariableRange getInitRange(const RandomVariableRange&,
- const RandomVariableRange&) const override {
- return RandomVariableRange(0);
- }
- // The slowest algorithm:
- // iterate through every combinations of parents and save the valid pairs.
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out,
- std::set<int>* childOut) const override {
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- if (this->check(i, j)) {
- parent2Out->insert(j);
- valid = true;
- }
- }
- if (valid) parent1Out->insert(i);
- }
- if (!parent1Out->empty()) childOut->insert(0);
- }
-};
-
-class Addition : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs + rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // For parents and child with close range, the out range can be computed directly
- // without iterations.
- std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
- std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
-
- // From ranges for parent, evalute range for child.
- // [a, b] + [c, d] -> [a + c, b + d]
- fillRange(childOut, std::max(child.first, parent1.first + parent2.first),
- std::min(child.second, parent1.second + parent2.second));
-
- // From ranges for child and one parent, evalute range for another parent.
- // [a, b] - [c, d] -> [a - d, b - c]
- fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second),
- std::min(parent1.second, child.second - parent2.first));
- fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second),
- std::min(parent2.second, child.second - parent1.first));
- }
- }
- virtual const char* getName() const override { return "ADD"; }
-};
-
-class Subtraction : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs - rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // Similar algorithm as Addition.
- std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
- std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- fillRange(childOut, std::max(child.first, parent1.first - parent2.second),
- std::min(child.second, parent1.second - parent2.first));
- fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first),
- std::min(parent1.second, child.second + parent2.second));
- fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second),
- std::min(parent2.second, parent1.second - child.first));
- }
- }
- virtual const char* getName() const override { return "SUB"; }
-};
-
-class Multiplication : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs * rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- if (lhs.min() < 0 || rhs.min() < 0) {
- return IRandomVariableOp::getInitRange(lhs, rhs);
- } else {
- int lower = std::min(lhs.min() * rhs.min(), kMaxValue);
- int upper = std::min(lhs.max() * rhs.max(), kMaxValue);
- return RandomVariableRange(lower, upper);
- }
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- int res = this->eval(i, j);
- // Since MUL increases monotonically with one value, break the loop if the
- // result is larger than the limit.
- if (res > child.second) break;
- if (res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- valid = true;
- parent2Out->insert(j);
- childOut->insert(res);
- }
- }
- if (valid) parent1Out->insert(i);
- }
- }
- }
- virtual const char* getName() const override { return "MUL"; }
-};
-
-class Division : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return rhs == 0 ? kInvalidValue : lhs / rhs;
- }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- if (lhs.min() < 0 || rhs.min() <= 0) {
- return IRandomVariableOp::getInitRange(lhs, rhs);
- } else {
- return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min());
- }
- }
- virtual const char* getName() const override { return "DIV"; }
-};
-
-class ExactDivision : public Division {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs;
- }
- virtual const char* getName() const override { return "EXACT_DIV"; }
-};
-
-class Modulo : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return rhs == 0 ? kInvalidValue : lhs % rhs;
- }
- virtual RandomVariableRange getInitRange(const RandomVariableRange&,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(0, rhs.max());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (*childIn->begin() != 0 || childIn->size() != 1u) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // For the special case that child is a const 0, it would be faster if the range for
- // parents are evaluated separately.
-
- // Evalute parent1 directly.
- for (auto i : *parent1In) {
- for (auto j : *parent2In) {
- if (i % j == 0) {
- parent1Out->insert(i);
- break;
- }
- }
- }
- // Evalute parent2, see if a multiple of parent2 value can be found in parent1.
- int parent1Max = *parent1In->rbegin();
- for (auto i : *parent2In) {
- int jMax = parent1Max / i;
- for (int j = 1; j <= jMax; j++) {
- if (parent1In->find(i * j) != parent1In->end()) {
- parent2Out->insert(i);
- break;
- }
- }
- }
- if (!parent1Out->empty()) childOut->insert(0);
- }
- }
- virtual const char* getName() const override { return "MOD"; }
-};
-
-class Maximum : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); }
- virtual const char* getName() const override { return "MAX"; }
-};
-
-class Minimum : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); }
- virtual const char* getName() const override { return "MIN"; }
-};
-
-class Square : public IUnaryOp {
- public:
- virtual int eval(int val) const override { return val * val; }
- virtual const char* getName() const override { return "SQUARE"; }
-};
-
-class UnaryEqual : public IUnaryOp {
- public:
- virtual int eval(int val) const override { return val; }
- virtual const char* getName() const override { return "UNARY_EQUAL"; }
-};
-
-class Equal : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs == rhs; }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0);
- // The intersection of two sets can be found in O(n).
- std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(),
- parent2In->end(), std::inserter(*parent1Out, parent1Out->begin()));
- *parent2Out = *parent1Out;
- childOut->insert(0);
- }
- virtual const char* getName() const override { return "EQUAL"; }
-};
-
-class GreaterThan : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs > rhs; }
- virtual const char* getName() const override { return "GREATER_THAN"; }
-};
-
-class GreaterEqual : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; }
- virtual const char* getName() const override { return "GREATER_EQUAL"; }
-};
-
-class FloatMultiplication : public IUnaryOp {
- public:
- FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {}
- virtual int eval(int val) const override {
- return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand));
- }
- virtual const char* getName() const override { return "MUL_FLOAT"; }
-
- private:
- float mMultiplicand;
-};
-
-// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes.
-// Since there must be at most one edge between two RandomVariableNodes, we have to do something
-// special when both sides are refering to the same node.
-
-RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get())
- : RandomVariable(lhs, rhs, Singleton<Addition>::get());
-}
-RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(0)
- : RandomVariable(lhs, rhs, Singleton<Subtraction>::get());
-}
-RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get())
- : RandomVariable(lhs, rhs, Singleton<Multiplication>::get());
-}
-RandomVariable operator*(const RandomVariable& lhs, const float& rhs) {
- return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs));
-}
-RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(1)
- : RandomVariable(lhs, rhs, Singleton<Division>::get());
-}
-RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(0)
- : RandomVariable(lhs, rhs, Singleton<Modulo>::get());
-}
-RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get());
-}
-RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get());
-}
-
-RandomVariable RandomVariable::exactDiv(const RandomVariable& other) {
- return mVar == other.get() ? RandomVariable(1)
- : RandomVariable(*this, other, Singleton<ExactDivision>::get());
-}
-
-RandomVariable RandomVariable::setEqual(const RandomVariable& other) const {
- RandomVariableNode node1 = mVar, node2 = other.get();
- NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index;
-
- // Do not setEqual on the same pair twice.
- if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) ||
- (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) {
- NN_FUZZER_LOG << "Already equal. Return.";
- return RandomVariable();
- }
-
- // If possible, always try UnaryEqual first to reduce the search space.
- // UnaryEqual can be used if node B is FREE and is evaluated later than node A.
- // TODO: Reduce code duplication.
- if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) {
- NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index;
- node2->type = RandomVariableType::OP;
- node2->parent1 = node1;
- node2->op = Singleton<UnaryEqual>::get();
- node1->children.push_back(node2);
- RandomVariableNetwork::get()->join(node1, node2);
- node1->updateTimestamp();
- return other;
- }
- if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) {
- NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index;
- node1->type = RandomVariableType::OP;
- node1->parent1 = node2;
- node1->op = Singleton<UnaryEqual>::get();
- node2->children.push_back(node1);
- RandomVariableNetwork::get()->join(node2, node1);
- node1->updateTimestamp();
- return *this;
- }
- return RandomVariable(*this, other, Singleton<Equal>::get());
-}
-
-RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const {
- NN_FUZZER_CHECK(mVar != other.get());
- return RandomVariable(*this, other, Singleton<GreaterThan>::get());
-}
-RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const {
- return mVar == other.get() ? *this
- : RandomVariable(*this, other, Singleton<GreaterEqual>::get());
-}
-
-void DisjointNetwork::add(const RandomVariableNode& var) {
- // Find the subnet index of the parents and decide the index for var.
- int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1];
- int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2];
- int ind = join(ind1, ind2);
- // If no parent, put it into a new subnet component.
- if (ind == -1) ind = mNextIndex++;
- NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind;
- mIndexMap[var] = ind;
- mEvalOrderMap[ind].push_back(var);
-}
-
-int DisjointNetwork::join(int ind1, int ind2) {
- if (ind1 == -1) return ind2;
- if (ind2 == -1) return ind1;
- if (ind1 == ind2) return ind1;
- NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2;
- auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2];
- // Append every node in ind2 to the end of ind1
- for (const auto& var : order2) {
- order1.push_back(var);
- mIndexMap[var] = ind1;
- }
- // Remove ind2 from mEvalOrderMap.
- mEvalOrderMap.erase(mEvalOrderMap.find(ind2));
- return ind1;
-}
-
-RandomVariableNetwork* RandomVariableNetwork::get() {
- static RandomVariableNetwork instance;
- return &instance;
-}
-
-void RandomVariableNetwork::initialize(int defaultValue) {
- RandomVariableBase::globalIndex = 0;
- RandomVariable::defaultValue = defaultValue;
- mIndexMap.clear();
- mEvalOrderMap.clear();
- mDimProd.clear();
- mNextIndex = 0;
- mGlobalTime = 0;
- mTimestamp = -1;
-}
-
-bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1,
- const RandomVariableNode& node2) {
- if (node2->type != RandomVariableType::FREE) return false;
- int ind1 = mIndexMap[node1];
- // node2 is of a different subnet.
- if (ind1 != mIndexMap[node2]) return true;
- for (const auto& node : mEvalOrderMap[ind1]) {
- if (node == node2) return false;
- // node2 is of the same subnet but evaluated later than node1.
- if (node == node1) return true;
- }
- NN_FUZZER_CHECK(false) << "Code executed in non-reachable region.";
- return false;
-}
-
-struct EvalInfo {
- // The RandomVariableNode that this EvalInfo is associated with.
- // var->value is the current value during evaluation.
- RandomVariableNode var;
-
- // The RandomVariable value is staged when a valid combination is found.
- std::set<int> staging;
-
- // The staging values are committed after a subnet evaluation.
- std::set<int> committed;
-
- // Keeps track of the latest timestamp that committed is updated.
- int timestamp;
-
- // For evalSubnetWithLocalNetwork.
- RandomVariableType originalType;
-
- // Should only invoke eval on OP RandomVariable.
- bool eval() {
- NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
- var->value = var->op->eval(var->parent1->value,
- var->parent2 == nullptr ? 0 : var->parent2->value);
- if (var->value == kInvalidValue) return false;
- return committed.find(var->value) != committed.end();
- }
- void stage() { staging.insert(var->value); }
- void commit() {
- // Only update committed and timestamp if the range is *indeed* changed.
- if (staging.size() != committed.size()) {
- committed = std::move(staging);
- timestamp = RandomVariableNetwork::get()->getGlobalTime();
- }
- staging.clear();
- }
- void updateRange() {
- // Only update range and timestamp if the range is *indeed* changed.
- if (committed.size() != var->range.size()) {
- var->range = RandomVariableRange(committed);
- var->timestamp = timestamp;
- }
- committed.clear();
- }
-
- EvalInfo(const RandomVariableNode& var)
- : var(var),
- committed(var->range.getChoices().begin(), var->range.getChoices().end()),
- timestamp(var->timestamp) {}
-};
-using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>;
-
-// For logging only.
-inline std::string toString(const RandomVariableNode& var, EvalContext* context) {
- std::stringstream ss;
- ss << "var" << var->index << " = ";
- const auto& committed = context->at(var).committed;
- switch (var->type) {
- case RandomVariableType::FREE:
- ss << "FREE ["
- << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]";
- break;
- case RandomVariableType::CONST:
- ss << "CONST " << toString(var->value);
- break;
- case RandomVariableType::OP:
- ss << "var" << var->parent1->index << " " << var->op->getName();
- if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
- ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end()))
- << "]";
- break;
- default:
- NN_FUZZER_CHECK(false);
- }
- ss << ", timestamp = " << context->at(var).timestamp;
- return ss.str();
-}
-
-// Check if the subnet needs to be re-evaluated by comparing the timestamps.
-static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime,
- EvalContext* context = nullptr) {
- for (const auto& var : evalOrder) {
- int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp;
- // If we find a node that has been modified since last evaluation, the subnet needs to be
- // re-evaluated.
- if (timestamp > subnetTime) return true;
- }
- return false;
-}
-
-// Helper function to evaluate the subnet recursively.
-// Iterate through all combinations of FREE RandomVariables choices.
-static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) {
- if (i == evalOrder.size()) {
- // Reach the end of the evaluation, find a valid combination.
- for (auto& var : evalOrder) context->at(var).stage();
- return;
- }
- const auto& var = evalOrder[i];
- if (var->type == RandomVariableType::FREE) {
- // For FREE RandomVariable, iterate through all valid choices.
- for (int val : context->at(var).committed) {
- var->value = val;
- evalSubnetHelper(evalOrder, context, i + 1);
- }
- return;
- } else if (var->type == RandomVariableType::OP) {
- // For OP RandomVariable, evaluate from parents and terminate if the result is invalid.
- if (!context->at(var).eval()) return;
- }
- evalSubnetHelper(evalOrder, context, i + 1);
-}
-
-// Check if the subnet has only one single OP RandomVariable.
-static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) {
- int numOp = 0;
- for (const auto& var : evalOrder) {
- if (var->type == RandomVariableType::OP) numOp++;
- if (numOp > 1) return false;
- }
- return numOp != 0;
-}
-
-// Evaluate with a potentially faster approach provided by IRandomVariableOp.
-static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder,
- EvalContext* context) {
- NN_FUZZER_LOG << "Identified as single op subnet";
- const auto& var = evalOrder.back();
- NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
- var->op->eval(&context->at(var->parent1).committed,
- var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed,
- &context->at(var).committed, &context->at(var->parent1).staging,
- var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging,
- &context->at(var).staging);
-}
-
-// Check if the number of combinations of FREE RandomVariables exceeds the limit.
-static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder,
- EvalContext* context = nullptr) {
- constexpr uint64_t kLimit = 1e8;
- uint64_t numCombinations = 1;
- for (const auto& var : evalOrder) {
- if (var->type == RandomVariableType::FREE) {
- size_t size =
- context == nullptr ? var->range.size() : context->at(var).committed.size();
- numCombinations *= size;
- // To prevent overflow.
- if (numCombinations > kLimit) return kLimit;
- }
- }
- return numCombinations;
-}
-
-// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE
-// RandomVariable exceeds the threshold kMaxNumCombinations.
-static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) {
- constexpr uint64_t kMaxNumCombinations = 1e7;
- NN_FUZZER_LOG << "Evaluate with brute force";
- if (isSingleOpSubnet(evalOrder)) {
- // If the network only have one single OP, dispatch to a faster evaluation.
- evalSubnetSingleOpHelper(evalOrder, context);
- } else {
- if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) {
- NN_FUZZER_LOG << "Terminate the evaluation because of large search range";
- std::cout << "[ ] Terminate the evaluation because of large search range"
- << std::endl;
- return false;
- }
- evalSubnetHelper(evalOrder, context);
- }
- for (auto& var : evalOrder) {
- if (context->at(var).staging.empty()) {
- NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context);
- return false;
- }
- context->at(var).commit();
- }
- return true;
-}
-
-struct LocalNetwork {
- EvaluationOrder evalOrder;
- std::vector<RandomVariableNode> bridgeNodes;
- int timestamp = 0;
-
- bool eval(EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp;
- // Temporarily treat bridge nodes as FREE RandomVariables.
- for (const auto& var : bridgeNodes) {
- context->at(var).originalType = var->type;
- var->type = RandomVariableType::FREE;
- }
- for (const auto& var : evalOrder) {
- context->at(var).staging.clear();
- NN_FUZZER_LOG << " - " << toString(var, context);
- }
- bool success = evalSubnetWithBruteForce(evalOrder, context);
- // Reset the RandomVariable types for bridge nodes.
- for (const auto& var : bridgeNodes) var->type = context->at(var).originalType;
- return success;
- }
-};
-
-// Partition the network further into LocalNetworks based on the result from bridge annotation
-// algorithm.
-class GraphPartitioner : public DisjointNetwork {
- public:
- GraphPartitioner() = default;
-
- std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) {
- annotateBridge(evalOrder);
- for (const auto& var : evalOrder) add(var);
- return get(timestamp);
- }
-
- private:
- GraphPartitioner(const GraphPartitioner&) = delete;
- GraphPartitioner& operator=(const GraphPartitioner&) = delete;
-
- // Find the parent-child relationship between var1 and var2, and reset the bridge.
- void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) {
- if (var1->parent1 == var2) {
- mBridgeInfo[var1].isParent1Bridge = true;
- } else if (var1->parent2 == var2) {
- mBridgeInfo[var1].isParent2Bridge = true;
- } else {
- setBridgeFlag(var2, var1);
- }
- }
-
- // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is
- // reachable from a node in the subtree of b. The complexity is O(V + E).
- // discoveryTime: The timestamp a node is visited
- // lowTime: The min discovery time of all reachable nodes from the subtree of the node.
- void annotateBridgeHelper(const RandomVariableNode& var, int* time) {
- mBridgeInfo[var].visited = true;
- mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++;
-
- // The algorithm operates on undirected graph. First find all adjacent nodes.
- auto adj = var->children;
- if (var->parent1 != nullptr) adj.push_back(var->parent1);
- if (var->parent2 != nullptr) adj.push_back(var->parent2);
-
- for (const auto& weakChild : adj) {
- auto child = weakChild.lock();
- NN_FUZZER_CHECK(child != nullptr);
- if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue;
- if (!mBridgeInfo[child].visited) {
- mBridgeInfo[child].parent = var;
- annotateBridgeHelper(child, time);
-
- // If none of nodes in the subtree of child is connected to any ancestors of var,
- // then it is a bridge.
- mBridgeInfo[var].lowTime =
- std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime);
- if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime)
- setBridgeFlag(var, child);
- } else if (mBridgeInfo[var].parent != child) {
- mBridgeInfo[var].lowTime =
- std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime);
- }
- }
- }
-
- // Find all bridges in the subnet with DFS.
- void annotateBridge(const EvaluationOrder& evalOrder) {
- for (const auto& var : evalOrder) mBridgeInfo[var];
- int time = 0;
- for (const auto& var : evalOrder) {
- if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time);
- }
- }
-
- // Re-partition the network by treating bridges as no edge.
- void add(const RandomVariableNode& var) {
- auto parent1 = var->parent1;
- auto parent2 = var->parent2;
- if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr;
- if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr;
- DisjointNetwork::add(var);
- var->parent1 = parent1;
- var->parent2 = parent2;
- }
-
- // Add bridge nodes to the local network and remove single node subnet.
- std::vector<LocalNetwork> get(int timestamp) {
- std::vector<LocalNetwork> res;
- for (auto& pair : mEvalOrderMap) {
- // We do not need to evaluate subnet with only a single node.
- if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue;
- res.emplace_back();
- for (const auto& var : pair.second) {
- if (mBridgeInfo[var].isParent1Bridge) {
- res.back().evalOrder.push_back(var->parent1);
- res.back().bridgeNodes.push_back(var->parent1);
- }
- if (mBridgeInfo[var].isParent2Bridge) {
- res.back().evalOrder.push_back(var->parent2);
- res.back().bridgeNodes.push_back(var->parent2);
- }
- res.back().evalOrder.push_back(var);
- }
- res.back().timestamp = timestamp;
- }
- return res;
- }
-
- // For bridge discovery algorithm.
- struct BridgeInfo {
- bool isParent1Bridge = false;
- bool isParent2Bridge = false;
- int discoveryTime = 0;
- int lowTime = 0;
- bool visited = false;
- std::shared_ptr<RandomVariableBase> parent = nullptr;
- };
- std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo;
-};
-
-// Evaluate subnets repeatedly until converge.
-// Class T_Subnet must have member evalOrder, timestamp, and member function eval.
-template <class T_Subnet>
-inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) {
- bool terminate = false;
- while (!terminate) {
- terminate = true;
- for (auto& subnet : *subnets) {
- if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) {
- if (!subnet.eval(context)) return false;
- subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime();
- terminate = false;
- }
- }
- }
- return true;
-}
-
-// Evaluate the subnet by first partitioning it further into LocalNetworks.
-static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp,
- EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate with local network";
- auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp);
- return evalSubnetsRepeatedly(&localNetworks, context);
-}
-
-struct LeafNetwork {
- EvaluationOrder evalOrder;
- int timestamp = 0;
- LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) {
- std::set<RandomVariableNode> visited;
- constructorHelper(var, &visited);
- }
- // Construct the leaf network by recursively including parent nodes.
- void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) {
- if (var == nullptr || visited->find(var) != visited->end()) return;
- constructorHelper(var->parent1, visited);
- constructorHelper(var->parent2, visited);
- visited->insert(var);
- evalOrder.push_back(var);
- }
- bool eval(EvalContext* context) {
- return evalSubnetWithLocalNetwork(evalOrder, timestamp, context);
- }
-};
-
-// Evaluate the subnet by leaf network.
-// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%).
-// The random graph generator is expected to retry if it fails.
-static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp,
- EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate with leaf network";
- // Construct leaf networks.
- std::vector<LeafNetwork> leafNetworks;
- for (const auto& var : evalOrder) {
- if (var->children.empty()) {
- NN_FUZZER_LOG << "Found leaf " << toString(var, context);
- leafNetworks.emplace_back(var, timestamp);
- }
- }
- return evalSubnetsRepeatedly(&leafNetworks, context);
-}
-
-void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) {
- if (dims.size() <= 1) return;
- EvaluationOrder order;
- for (const auto& dim : dims) order.push_back(dim.get());
- mDimProd.push_back(order);
-}
-
-bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd,
- const std::unordered_map<RandomVariableNode, int>& indexMap,
- EvalContext* context, std::set<int>* dirtySubnets) {
- for (auto& evalOrder : mDimProd) {
- NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size();
- // Initialize EvalInfo of each RandomVariable.
- for (auto& var : evalOrder) {
- if (context->find(var) == context->end()) context->emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var, context);
- }
-
- // Enforce the product of the dimension values below kMaxValue:
- // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...)
- int prod = 1;
- for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin());
- for (auto& var : evalOrder) {
- auto& committed = context->at(var).committed;
- int maxValue = kMaxValue / (prod / *committed.begin());
- auto it = committed.upper_bound(maxValue);
- // var has empty range -> no solution.
- if (it == committed.begin()) return false;
- // The range is not modified -> continue.
- if (it == committed.end()) continue;
- // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation.
- committed.erase(it, committed.end());
- context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime();
- dirtySubnets->insert(indexMap.at(var));
- }
- }
- return true;
-}
-
-bool RandomVariableNetwork::evalRange() {
- constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500;
- constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5;
- NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks";
- EvalContext context;
- std::set<int> dirtySubnets; // Which subnets needs evaluation.
- for (auto& pair : mEvalOrderMap) {
- const auto& evalOrder = pair.second;
- // Decide whether needs evaluation by timestamp -- if no range has changed after the last
- // evaluation, then the subnet does not need re-evaluation.
- if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue;
- dirtySubnets.insert(pair.first);
- }
- if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
-
- // Repeat until the ranges converge.
- while (!dirtySubnets.empty()) {
- for (int ind : dirtySubnets) {
- const auto& evalOrder = mEvalOrderMap[ind];
- NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size();
-
- // Initialize EvalInfo of each RandomVariable.
- for (auto& var : evalOrder) {
- if (context.find(var) == context.end()) context.emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var, &context);
- }
-
- // Dispatch to different algorithm according to search range.
- bool success;
- uint64_t numCombinations = getNumCombinations(evalOrder);
- if (numCombinations <= kMaxNumCombinationsWithBruteForce) {
- success = evalSubnetWithBruteForce(evalOrder, &context);
- } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) {
- success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context);
- } else {
- success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context);
- }
- if (!success) return false;
- }
- dirtySubnets.clear();
- if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
- }
- // A successful evaluation, update RandomVariables from EvalContext.
- for (auto& pair : context) pair.second.updateRange();
- mTimestamp = getGlobalTime();
- NN_FUZZER_LOG << "Finish range evaluation";
- return true;
-}
-
-static void unsetEqual(const RandomVariableNode& node) {
- if (node == nullptr) return;
- NN_FUZZER_LOG << "Unset equality of var" << node->index;
- auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) {
- return ptr.lock() == node;
- };
- RandomVariableNode parent1 = node->parent1, parent2 = node->parent2;
- parent1->children.erase(
- std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual));
- node->parent1 = nullptr;
- if (parent2 != nullptr) {
- // For Equal.
- parent2->children.erase(
- std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual));
- node->parent2 = nullptr;
- } else {
- // For UnaryEqual.
- node->type = RandomVariableType::FREE;
- node->op = nullptr;
- }
-}
-
-// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is
-// constructed. Only used when setEqualIfCompatible results in incompatible.
-class RandomVariableNetwork::Reverter {
- public:
- // Take a snapshot of RandomVariableNetwork when Reverter is constructed.
- Reverter() : mSnapshot(*RandomVariableNetwork::get()) {}
- // Add constraint (Equal) nodes to the reverter.
- void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); }
- void revert() {
- NN_FUZZER_LOG << "Revert RandomVariableNetwork";
- // Release the constraints.
- for (const auto& node : mEqualNodes) unsetEqual(node);
- // Reset all member variables.
- *RandomVariableNetwork::get() = std::move(mSnapshot);
- }
-
- private:
- Reverter(const Reverter&) = delete;
- Reverter& operator=(const Reverter&) = delete;
- RandomVariableNetwork mSnapshot;
- std::vector<RandomVariableNode> mEqualNodes;
-};
-
-bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs,
- const std::vector<RandomVariable>& rhs) {
- NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {"
- << joinStr(", ", rhs) << "}";
- if (lhs.size() != rhs.size()) return false;
- Reverter reverter;
- bool result = true;
- for (size_t i = 0; i < lhs.size(); i++) {
- auto node = lhs[i].setEqual(rhs[i]).get();
- reverter.addNode(node);
- // Early terminate if there is no common choice between two ranges.
- if (node != nullptr && node->range.empty()) result = false;
- }
- result = result && evalRange();
- if (!result) reverter.revert();
- NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]");
- return result;
-}
-
-bool RandomVariableNetwork::freeze() {
- NN_FUZZER_LOG << "Freeze the random network";
- if (!evalRange()) return false;
-
- std::vector<RandomVariableNode> nodes;
- for (const auto& pair : mEvalOrderMap) {
- // Find all FREE RandomVariables in the subnet.
- for (const auto& var : pair.second) {
- if (var->type == RandomVariableType::FREE) nodes.push_back(var);
- }
- }
-
- // Randomly shuffle the order, this is for a more uniform randomness.
- randomShuffle(&nodes);
-
- // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable.
- // TODO: Might be able to optimize this.
- for (const auto& var : nodes) {
- if (var->type != RandomVariableType::FREE) continue;
- size_t size = var->range.size();
- NN_FUZZER_LOG << "Freeze " << toString(var);
- var->freeze();
- NN_FUZZER_LOG << " " << toString(var);
- // There is no need to re-evaluate if the FREE RandomVariable have only one choice.
- if (size > 1) {
- var->updateTimestamp();
- if (!evalRange()) {
- NN_FUZZER_LOG << "Freeze failed at " << toString(var);
- return false;
- }
- }
- }
- NN_FUZZER_LOG << "Finish freezing the random network";
- return true;
-}
-
-} // namespace fuzzing_test
-} // namespace nn
-} // namespace android
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RandomVariable.h"
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "RandomGraphGeneratorUtils.h"
+
+namespace android {
+namespace nn {
+namespace fuzzing_test {
+
+unsigned int RandomVariableBase::globalIndex = 0;
+int RandomVariable::defaultValue = 10;
+
+RandomVariableBase::RandomVariableBase(int value)
+ : index(globalIndex++),
+ type(RandomVariableType::CONST),
+ range(value),
+ value(value),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(int lower, int upper)
+ : index(globalIndex++),
+ type(RandomVariableType::FREE),
+ range(lower, upper),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(const std::vector<int>& choices)
+ : index(globalIndex++),
+ type(RandomVariableType::FREE),
+ range(choices),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs,
+ const std::shared_ptr<const IRandomVariableOp>& op)
+ : index(globalIndex++),
+ type(RandomVariableType::OP),
+ range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)),
+ op(op),
+ parent1(lhs),
+ parent2(rhs),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+void RandomVariableRange::setRange(int lower, int upper) {
+ // kInvalidValue indicates unlimited bound.
+ auto head = lower == kInvalidValue ? mChoices.begin()
+ : std::lower_bound(mChoices.begin(), mChoices.end(), lower);
+ auto tail = upper == kInvalidValue ? mChoices.end()
+ : std::upper_bound(mChoices.begin(), mChoices.end(), upper);
+ NN_FUZZER_CHECK(head <= tail) << "Invalid range!";
+ if (head != mChoices.begin() || tail != mChoices.end()) {
+ mChoices = std::vector<int>(head, tail);
+ }
+}
+
+int RandomVariableRange::toConst() {
+ if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)};
+ return mChoices[0];
+}
+
+RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) {
+ std::vector<int> result(lhs.size() + rhs.size());
+ auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(),
+ rhs.mChoices.end(), result.begin());
+ result.resize(it - result.begin());
+ return RandomVariableRange(std::move(result));
+}
+
+void RandomVariableBase::freeze() {
+ if (type == RandomVariableType::CONST) return;
+ value = range.toConst();
+ type = RandomVariableType::CONST;
+}
+
+int RandomVariableBase::getValue() const {
+ switch (type) {
+ case RandomVariableType::CONST:
+ return value;
+ case RandomVariableType::OP:
+ return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue());
+ default:
+ NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index;
+ return 0;
+ }
+}
+
+void RandomVariableBase::updateTimestamp() {
+ timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp;
+}
+
+RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(const std::vector<int>& choices)
+ : mVar(new RandomVariableBase(choices)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(RandomVariableType type)
+ : mVar(new RandomVariableBase(1, defaultValue)) {
+ NN_FUZZER_CHECK(type == RandomVariableType::FREE);
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs,
+ const std::shared_ptr<const IRandomVariableOp>& op)
+ : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) {
+ // Make a copy if the parent is CONST. This will resolve the fake dependency problem.
+ if (mVar->parent1->type == RandomVariableType::CONST) {
+ mVar->parent1 = RandomVariable(mVar->parent1->value).get();
+ }
+ if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) {
+ mVar->parent2 = RandomVariable(mVar->parent2->value).get();
+ }
+ mVar->parent1->children.push_back(mVar);
+ if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar);
+ RandomVariableNetwork::get()->add(mVar);
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+}
+
+void RandomVariable::setRange(int lower, int upper) {
+ NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr";
+ NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index;
+ size_t oldSize = mVar->range.size();
+ mVar->range.setRange(lower, upper);
+ // Only update the timestamp if the range is *indeed* narrowed down.
+ if (mVar->range.size() != oldSize) mVar->updateTimestamp();
+}
+
+RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const {
+ std::set<int> st;
+ for (auto i : lhs.getChoices()) {
+ for (auto j : rhs.getChoices()) {
+ int res = this->eval(i, j);
+ if (res > kMaxValue || res < -kMaxValue) continue;
+ st.insert(res);
+ }
+ }
+ return RandomVariableRange(st);
+}
+
+// Check if the range contains exactly all values in [min, max].
+static inline bool isContinuous(const std::set<int>* range) {
+ return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size());
+}
+
+// Fill the set with a range of values specified by [lower, upper].
+static inline void fillRange(std::set<int>* range, int lower, int upper) {
+ for (int i = lower; i <= upper; i++) range->insert(i);
+}
+
+// The slowest algorithm: iterate through every combinations of parents and save the valid pairs.
+void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const {
+ // Avoid the binary search if the child is a closed range.
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ int res = this->eval(i, j);
+ // Avoid the binary search if obviously out of range.
+ if (res > child.second || res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ parent2Out->insert(j);
+ childOut->insert(res);
+ valid = true;
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+}
+
+// A helper template to make a class into a Singleton.
+template <class T>
+class Singleton : public T {
+ public:
+ static const std::shared_ptr<const T>& get() {
+ static std::shared_ptr<const T> instance(new T);
+ return instance;
+ }
+};
+
+// A set of operations that only compute on a single input value.
+class IUnaryOp : public IRandomVariableOp {
+ public:
+ using IRandomVariableOp::eval;
+ virtual int eval(int val) const = 0;
+ virtual int eval(int lhs, int) const override { return eval(lhs); }
+ // The slowest algorithm: iterate through every value of the parent and save the valid one.
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ NN_FUZZER_CHECK(parent2In == nullptr);
+ NN_FUZZER_CHECK(parent2Out == nullptr);
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ int res = this->eval(i);
+ if (res > child.second || res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ parent1Out->insert(i);
+ childOut->insert(res);
+ }
+ }
+ }
+};
+
+// A set of operations that only check conditional constraints.
+class IConstraintOp : public IRandomVariableOp {
+ public:
+ using IRandomVariableOp::eval;
+ virtual bool check(int lhs, int rhs) const = 0;
+ virtual int eval(int lhs, int rhs) const override {
+ return check(lhs, rhs) ? 0 : kInvalidValue;
+ }
+ // The range for a constraint op is always {0}.
+ virtual RandomVariableRange getInitRange(const RandomVariableRange&,
+ const RandomVariableRange&) const override {
+ return RandomVariableRange(0);
+ }
+ // The slowest algorithm:
+ // iterate through every combinations of parents and save the valid pairs.
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out,
+ std::set<int>* childOut) const override {
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ if (this->check(i, j)) {
+ parent2Out->insert(j);
+ valid = true;
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+ if (!parent1Out->empty()) childOut->insert(0);
+ }
+};
+
+class Addition : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs + rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // For parents and child with close range, the out range can be computed directly
+ // without iterations.
+ std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
+ std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+
+ // From ranges for parent, evaluate range for child.
+ // [a, b] + [c, d] -> [a + c, b + d]
+ fillRange(childOut, std::max(child.first, parent1.first + parent2.first),
+ std::min(child.second, parent1.second + parent2.second));
+
+ // From ranges for child and one parent, evaluate range for another parent.
+ // [a, b] - [c, d] -> [a - d, b - c]
+ fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second),
+ std::min(parent1.second, child.second - parent2.first));
+ fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second),
+ std::min(parent2.second, child.second - parent1.first));
+ }
+ }
+ virtual const char* getName() const override { return "ADD"; }
+};
+
+class Subtraction : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs - rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // Similar algorithm as Addition.
+ std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
+ std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ fillRange(childOut, std::max(child.first, parent1.first - parent2.second),
+ std::min(child.second, parent1.second - parent2.first));
+ fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first),
+ std::min(parent1.second, child.second + parent2.second));
+ fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second),
+ std::min(parent2.second, parent1.second - child.first));
+ }
+ }
+ virtual const char* getName() const override { return "SUB"; }
+};
+
+class Multiplication : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs * rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ if (lhs.min() < 0 || rhs.min() < 0) {
+ return IRandomVariableOp::getInitRange(lhs, rhs);
+ } else {
+ int lower = std::min(lhs.min() * rhs.min(), kMaxValue);
+ int upper = std::min(lhs.max() * rhs.max(), kMaxValue);
+ return RandomVariableRange(lower, upper);
+ }
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ int res = this->eval(i, j);
+ // Since MUL increases monotonically with one value, break the loop if the
+ // result is larger than the limit.
+ if (res > child.second) break;
+ if (res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ valid = true;
+ parent2Out->insert(j);
+ childOut->insert(res);
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+ }
+ }
+ virtual const char* getName() const override { return "MUL"; }
+};
+
+class Division : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return rhs == 0 ? kInvalidValue : lhs / rhs;
+ }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ if (lhs.min() < 0 || rhs.min() <= 0) {
+ return IRandomVariableOp::getInitRange(lhs, rhs);
+ } else {
+ return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min());
+ }
+ }
+ virtual const char* getName() const override { return "DIV"; }
+};
+
+class ExactDivision : public Division {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs;
+ }
+ virtual const char* getName() const override { return "EXACT_DIV"; }
+};
+
+class Modulo : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return rhs == 0 ? kInvalidValue : lhs % rhs;
+ }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange&,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(0, rhs.max());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (*childIn->begin() != 0 || childIn->size() != 1u) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // For the special case that child is a const 0, it would be faster if the range for
+ // parents are evaluated separately.
+
+ // Evaluate parent1 directly.
+ for (auto i : *parent1In) {
+ for (auto j : *parent2In) {
+ if (i % j == 0) {
+ parent1Out->insert(i);
+ break;
+ }
+ }
+ }
+ // Evaluate parent2, see if a multiple of parent2 value can be found in parent1.
+ int parent1Max = *parent1In->rbegin();
+ for (auto i : *parent2In) {
+ int jMax = parent1Max / i;
+ for (int j = 1; j <= jMax; j++) {
+ if (parent1In->find(i * j) != parent1In->end()) {
+ parent2Out->insert(i);
+ break;
+ }
+ }
+ }
+ if (!parent1Out->empty()) childOut->insert(0);
+ }
+ }
+ virtual const char* getName() const override { return "MOD"; }
+};
+
+class Maximum : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); }
+ virtual const char* getName() const override { return "MAX"; }
+};
+
+class Minimum : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); }
+ virtual const char* getName() const override { return "MIN"; }
+};
+
+class Square : public IUnaryOp {
+ public:
+ virtual int eval(int val) const override { return val * val; }
+ virtual const char* getName() const override { return "SQUARE"; }
+};
+
+class UnaryEqual : public IUnaryOp {
+ public:
+ virtual int eval(int val) const override { return val; }
+ virtual const char* getName() const override { return "UNARY_EQUAL"; }
+};
+
+class Equal : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs == rhs; }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0);
+ // The intersection of two sets can be found in O(n).
+ std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(),
+ parent2In->end(), std::inserter(*parent1Out, parent1Out->begin()));
+ *parent2Out = *parent1Out;
+ childOut->insert(0);
+ }
+ virtual const char* getName() const override { return "EQUAL"; }
+};
+
+class GreaterThan : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs > rhs; }
+ virtual const char* getName() const override { return "GREATER_THAN"; }
+};
+
+class GreaterEqual : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; }
+ virtual const char* getName() const override { return "GREATER_EQUAL"; }
+};
+
+class FloatMultiplication : public IUnaryOp {
+ public:
+ FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {}
+ virtual int eval(int val) const override {
+ return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand));
+ }
+ virtual const char* getName() const override { return "MUL_FLOAT"; }
+
+ private:
+ float mMultiplicand;
+};
+
+// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes.
+// Since there must be at most one edge between two RandomVariableNodes, we have to do something
+// special when both sides are refering to the same node.
+
+RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get())
+ : RandomVariable(lhs, rhs, Singleton<Addition>::get());
+}
+RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(0)
+ : RandomVariable(lhs, rhs, Singleton<Subtraction>::get());
+}
+RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get())
+ : RandomVariable(lhs, rhs, Singleton<Multiplication>::get());
+}
+RandomVariable operator*(const RandomVariable& lhs, const float& rhs) {
+ return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs));
+}
+RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(1)
+ : RandomVariable(lhs, rhs, Singleton<Division>::get());
+}
+RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(0)
+ : RandomVariable(lhs, rhs, Singleton<Modulo>::get());
+}
+RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get());
+}
+RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get());
+}
+
+RandomVariable RandomVariable::exactDiv(const RandomVariable& other) {
+ return mVar == other.get() ? RandomVariable(1)
+ : RandomVariable(*this, other, Singleton<ExactDivision>::get());
+}
+
+RandomVariable RandomVariable::setEqual(const RandomVariable& other) const {
+ RandomVariableNode node1 = mVar, node2 = other.get();
+ NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index;
+
+ // Do not setEqual on the same pair twice.
+ if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) ||
+ (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) {
+ NN_FUZZER_LOG << "Already equal. Return.";
+ return RandomVariable();
+ }
+
+ // If possible, always try UnaryEqual first to reduce the search space.
+ // UnaryEqual can be used if node B is FREE and is evaluated later than node A.
+ // TODO: Reduce code duplication.
+ if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) {
+ NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index;
+ node2->type = RandomVariableType::OP;
+ node2->parent1 = node1;
+ node2->op = Singleton<UnaryEqual>::get();
+ node1->children.push_back(node2);
+ RandomVariableNetwork::get()->join(node1, node2);
+ node1->updateTimestamp();
+ return other;
+ }
+ if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) {
+ NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index;
+ node1->type = RandomVariableType::OP;
+ node1->parent1 = node2;
+ node1->op = Singleton<UnaryEqual>::get();
+ node2->children.push_back(node1);
+ RandomVariableNetwork::get()->join(node2, node1);
+ node1->updateTimestamp();
+ return *this;
+ }
+ return RandomVariable(*this, other, Singleton<Equal>::get());
+}
+
+RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const {
+ NN_FUZZER_CHECK(mVar != other.get());
+ return RandomVariable(*this, other, Singleton<GreaterThan>::get());
+}
+RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const {
+ return mVar == other.get() ? *this
+ : RandomVariable(*this, other, Singleton<GreaterEqual>::get());
+}
+
+void DisjointNetwork::add(const RandomVariableNode& var) {
+ // Find the subnet index of the parents and decide the index for var.
+ int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1];
+ int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2];
+ int ind = join(ind1, ind2);
+ // If no parent, put it into a new subnet component.
+ if (ind == -1) ind = mNextIndex++;
+ NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind;
+ mIndexMap[var] = ind;
+ mEvalOrderMap[ind].push_back(var);
+}
+
+int DisjointNetwork::join(int ind1, int ind2) {
+ if (ind1 == -1) return ind2;
+ if (ind2 == -1) return ind1;
+ if (ind1 == ind2) return ind1;
+ NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2;
+ auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2];
+ // Append every node in ind2 to the end of ind1
+ for (const auto& var : order2) {
+ order1.push_back(var);
+ mIndexMap[var] = ind1;
+ }
+ // Remove ind2 from mEvalOrderMap.
+ mEvalOrderMap.erase(mEvalOrderMap.find(ind2));
+ return ind1;
+}
+
+RandomVariableNetwork* RandomVariableNetwork::get() {
+ static RandomVariableNetwork instance;
+ return &instance;
+}
+
+void RandomVariableNetwork::initialize(int defaultValue) {
+ RandomVariableBase::globalIndex = 0;
+ RandomVariable::defaultValue = defaultValue;
+ mIndexMap.clear();
+ mEvalOrderMap.clear();
+ mDimProd.clear();
+ mNextIndex = 0;
+ mGlobalTime = 0;
+ mTimestamp = -1;
+}
+
+bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1,
+ const RandomVariableNode& node2) {
+ if (node2->type != RandomVariableType::FREE) return false;
+ int ind1 = mIndexMap[node1];
+ // node2 is of a different subnet.
+ if (ind1 != mIndexMap[node2]) return true;
+ for (const auto& node : mEvalOrderMap[ind1]) {
+ if (node == node2) return false;
+ // node2 is of the same subnet but evaluated later than node1.
+ if (node == node1) return true;
+ }
+ NN_FUZZER_CHECK(false) << "Code executed in non-reachable region.";
+ return false;
+}
+
+struct EvalInfo {
+ // The RandomVariableNode that this EvalInfo is associated with.
+ // var->value is the current value during evaluation.
+ RandomVariableNode var;
+
+ // The RandomVariable value is staged when a valid combination is found.
+ std::set<int> staging;
+
+ // The staging values are committed after a subnet evaluation.
+ std::set<int> committed;
+
+ // Keeps track of the latest timestamp that committed is updated.
+ int timestamp;
+
+ // For evalSubnetWithLocalNetwork.
+ RandomVariableType originalType;
+
+ // Should only invoke eval on OP RandomVariable.
+ bool eval() {
+ NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
+ var->value = var->op->eval(var->parent1->value,
+ var->parent2 == nullptr ? 0 : var->parent2->value);
+ if (var->value == kInvalidValue) return false;
+ return committed.find(var->value) != committed.end();
+ }
+ void stage() { staging.insert(var->value); }
+ void commit() {
+ // Only update committed and timestamp if the range is *indeed* changed.
+ if (staging.size() != committed.size()) {
+ committed = std::move(staging);
+ timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ }
+ staging.clear();
+ }
+ void updateRange() {
+ // Only update range and timestamp if the range is *indeed* changed.
+ if (committed.size() != var->range.size()) {
+ var->range = RandomVariableRange(committed);
+ var->timestamp = timestamp;
+ }
+ committed.clear();
+ }
+
+ EvalInfo(const RandomVariableNode& var)
+ : var(var),
+ committed(var->range.getChoices().begin(), var->range.getChoices().end()),
+ timestamp(var->timestamp) {}
+};
+using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>;
+
+// For logging only.
+inline std::string toString(const RandomVariableNode& var, EvalContext* context) {
+ std::stringstream ss;
+ ss << "var" << var->index << " = ";
+ const auto& committed = context->at(var).committed;
+ switch (var->type) {
+ case RandomVariableType::FREE:
+ ss << "FREE ["
+ << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]";
+ break;
+ case RandomVariableType::CONST:
+ ss << "CONST " << var->value;
+ break;
+ case RandomVariableType::OP:
+ ss << "var" << var->parent1->index << " " << var->op->getName();
+ if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
+ ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end()))
+ << "]";
+ break;
+ default:
+ NN_FUZZER_CHECK(false);
+ }
+ ss << ", timestamp = " << context->at(var).timestamp;
+ return ss.str();
+}
+
+// Check if the subnet needs to be re-evaluated by comparing the timestamps.
+static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime,
+ EvalContext* context = nullptr) {
+ for (const auto& var : evalOrder) {
+ int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp;
+ // If we find a node that has been modified since last evaluation, the subnet needs to be
+ // re-evaluated.
+ if (timestamp > subnetTime) return true;
+ }
+ return false;
+}
+
+// Helper function to evaluate the subnet recursively.
+// Iterate through all combinations of FREE RandomVariables choices.
+static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) {
+ if (i == evalOrder.size()) {
+ // Reach the end of the evaluation, find a valid combination.
+ for (auto& var : evalOrder) context->at(var).stage();
+ return;
+ }
+ const auto& var = evalOrder[i];
+ if (var->type == RandomVariableType::FREE) {
+ // For FREE RandomVariable, iterate through all valid choices.
+ for (int val : context->at(var).committed) {
+ var->value = val;
+ evalSubnetHelper(evalOrder, context, i + 1);
+ }
+ return;
+ } else if (var->type == RandomVariableType::OP) {
+ // For OP RandomVariable, evaluate from parents and terminate if the result is invalid.
+ if (!context->at(var).eval()) return;
+ }
+ evalSubnetHelper(evalOrder, context, i + 1);
+}
+
+// Check if the subnet has only one single OP RandomVariable.
+static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) {
+ int numOp = 0;
+ for (const auto& var : evalOrder) {
+ if (var->type == RandomVariableType::OP) numOp++;
+ if (numOp > 1) return false;
+ }
+ return numOp != 0;
+}
+
+// Evaluate with a potentially faster approach provided by IRandomVariableOp.
+static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Identified as single op subnet";
+ const auto& var = evalOrder.back();
+ NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
+ var->op->eval(&context->at(var->parent1).committed,
+ var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed,
+ &context->at(var).committed, &context->at(var->parent1).staging,
+ var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging,
+ &context->at(var).staging);
+}
+
+// Check if the number of combinations of FREE RandomVariables exceeds the limit.
+static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder,
+ EvalContext* context = nullptr) {
+ constexpr uint64_t kLimit = 1e8;
+ uint64_t numCombinations = 1;
+ for (const auto& var : evalOrder) {
+ if (var->type == RandomVariableType::FREE) {
+ size_t size =
+ context == nullptr ? var->range.size() : context->at(var).committed.size();
+ numCombinations *= size;
+ // To prevent overflow.
+ if (numCombinations > kLimit) return kLimit;
+ }
+ }
+ return numCombinations;
+}
+
+// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE
+// RandomVariable exceeds the threshold kMaxNumCombinations.
+static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) {
+ constexpr uint64_t kMaxNumCombinations = 1e7;
+ NN_FUZZER_LOG << "Evaluate with brute force";
+ if (isSingleOpSubnet(evalOrder)) {
+ // If the network only have one single OP, dispatch to a faster evaluation.
+ evalSubnetSingleOpHelper(evalOrder, context);
+ } else {
+ if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) {
+ NN_FUZZER_LOG << "Terminate the evaluation because of large search range";
+ std::cout << "[ ] Terminate the evaluation because of large search range"
+ << std::endl;
+ return false;
+ }
+ evalSubnetHelper(evalOrder, context);
+ }
+ for (auto& var : evalOrder) {
+ if (context->at(var).staging.empty()) {
+ NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context);
+ return false;
+ }
+ context->at(var).commit();
+ }
+ return true;
+}
+
+struct LocalNetwork {
+ EvaluationOrder evalOrder;
+ std::vector<RandomVariableNode> bridgeNodes;
+ int timestamp = 0;
+
+ bool eval(EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp;
+ // Temporarily treat bridge nodes as FREE RandomVariables.
+ for (const auto& var : bridgeNodes) {
+ context->at(var).originalType = var->type;
+ var->type = RandomVariableType::FREE;
+ }
+ for (const auto& var : evalOrder) {
+ context->at(var).staging.clear();
+ NN_FUZZER_LOG << " - " << toString(var, context);
+ }
+ bool success = evalSubnetWithBruteForce(evalOrder, context);
+ // Reset the RandomVariable types for bridge nodes.
+ for (const auto& var : bridgeNodes) var->type = context->at(var).originalType;
+ return success;
+ }
+};
+
+// Partition the network further into LocalNetworks based on the result from bridge annotation
+// algorithm.
+class GraphPartitioner : public DisjointNetwork {
+ public:
+ GraphPartitioner() = default;
+
+ std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) {
+ annotateBridge(evalOrder);
+ for (const auto& var : evalOrder) add(var);
+ return get(timestamp);
+ }
+
+ private:
+ GraphPartitioner(const GraphPartitioner&) = delete;
+ GraphPartitioner& operator=(const GraphPartitioner&) = delete;
+
+ // Find the parent-child relationship between var1 and var2, and reset the bridge.
+ void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) {
+ if (var1->parent1 == var2) {
+ mBridgeInfo[var1].isParent1Bridge = true;
+ } else if (var1->parent2 == var2) {
+ mBridgeInfo[var1].isParent2Bridge = true;
+ } else {
+ setBridgeFlag(var2, var1);
+ }
+ }
+
+ // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is
+ // reachable from a node in the subtree of b. The complexity is O(V + E).
+ // discoveryTime: The timestamp a node is visited
+ // lowTime: The min discovery time of all reachable nodes from the subtree of the node.
+ void annotateBridgeHelper(const RandomVariableNode& var, int* time) {
+ mBridgeInfo[var].visited = true;
+ mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++;
+
+ // The algorithm operates on undirected graph. First find all adjacent nodes.
+ auto adj = var->children;
+ if (var->parent1 != nullptr) adj.push_back(var->parent1);
+ if (var->parent2 != nullptr) adj.push_back(var->parent2);
+
+ for (const auto& weakChild : adj) {
+ auto child = weakChild.lock();
+ NN_FUZZER_CHECK(child != nullptr);
+ if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue;
+ if (!mBridgeInfo[child].visited) {
+ mBridgeInfo[child].parent = var;
+ annotateBridgeHelper(child, time);
+
+ // If none of nodes in the subtree of child is connected to any ancestors of var,
+ // then it is a bridge.
+ mBridgeInfo[var].lowTime =
+ std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime);
+ if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime)
+ setBridgeFlag(var, child);
+ } else if (mBridgeInfo[var].parent != child) {
+ mBridgeInfo[var].lowTime =
+ std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime);
+ }
+ }
+ }
+
+ // Find all bridges in the subnet with DFS.
+ void annotateBridge(const EvaluationOrder& evalOrder) {
+ for (const auto& var : evalOrder) mBridgeInfo[var];
+ int time = 0;
+ for (const auto& var : evalOrder) {
+ if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time);
+ }
+ }
+
+ // Re-partition the network by treating bridges as no edge.
+ void add(const RandomVariableNode& var) {
+ auto parent1 = var->parent1;
+ auto parent2 = var->parent2;
+ if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr;
+ if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr;
+ DisjointNetwork::add(var);
+ var->parent1 = parent1;
+ var->parent2 = parent2;
+ }
+
+ // Add bridge nodes to the local network and remove single node subnet.
+ std::vector<LocalNetwork> get(int timestamp) {
+ std::vector<LocalNetwork> res;
+ for (auto& pair : mEvalOrderMap) {
+ // We do not need to evaluate subnet with only a single node.
+ if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue;
+ res.emplace_back();
+ for (const auto& var : pair.second) {
+ if (mBridgeInfo[var].isParent1Bridge) {
+ res.back().evalOrder.push_back(var->parent1);
+ res.back().bridgeNodes.push_back(var->parent1);
+ }
+ if (mBridgeInfo[var].isParent2Bridge) {
+ res.back().evalOrder.push_back(var->parent2);
+ res.back().bridgeNodes.push_back(var->parent2);
+ }
+ res.back().evalOrder.push_back(var);
+ }
+ res.back().timestamp = timestamp;
+ }
+ return res;
+ }
+
+ // For bridge discovery algorithm.
+ struct BridgeInfo {
+ bool isParent1Bridge = false;
+ bool isParent2Bridge = false;
+ int discoveryTime = 0;
+ int lowTime = 0;
+ bool visited = false;
+ std::shared_ptr<RandomVariableBase> parent = nullptr;
+ };
+ std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo;
+};
+
+// Evaluate subnets repeatedly until converge.
+// Class T_Subnet must have member evalOrder, timestamp, and member function eval.
+template <class T_Subnet>
+inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) {
+ bool terminate = false;
+ while (!terminate) {
+ terminate = true;
+ for (auto& subnet : *subnets) {
+ if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) {
+ if (!subnet.eval(context)) return false;
+ subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ terminate = false;
+ }
+ }
+ }
+ return true;
+}
+
+// Evaluate the subnet by first partitioning it further into LocalNetworks.
+static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate with local network";
+ auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp);
+ return evalSubnetsRepeatedly(&localNetworks, context);
+}
+
+struct LeafNetwork {
+ EvaluationOrder evalOrder;
+ int timestamp = 0;
+ LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) {
+ std::set<RandomVariableNode> visited;
+ constructorHelper(var, &visited);
+ }
+ // Construct the leaf network by recursively including parent nodes.
+ void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) {
+ if (var == nullptr || visited->find(var) != visited->end()) return;
+ constructorHelper(var->parent1, visited);
+ constructorHelper(var->parent2, visited);
+ visited->insert(var);
+ evalOrder.push_back(var);
+ }
+ bool eval(EvalContext* context) {
+ return evalSubnetWithLocalNetwork(evalOrder, timestamp, context);
+ }
+};
+
+// Evaluate the subnet by leaf network.
+// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%).
+// The random graph generator is expected to retry if it fails.
+static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate with leaf network";
+ // Construct leaf networks.
+ std::vector<LeafNetwork> leafNetworks;
+ for (const auto& var : evalOrder) {
+ if (var->children.empty()) {
+ NN_FUZZER_LOG << "Found leaf " << toString(var, context);
+ leafNetworks.emplace_back(var, timestamp);
+ }
+ }
+ return evalSubnetsRepeatedly(&leafNetworks, context);
+}
+
+void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) {
+ if (dims.size() <= 1) return;
+ EvaluationOrder order;
+ for (const auto& dim : dims) order.push_back(dim.get());
+ mDimProd.push_back(order);
+}
+
+bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd,
+ const std::unordered_map<RandomVariableNode, int>& indexMap,
+ EvalContext* context, std::set<int>* dirtySubnets) {
+ for (auto& evalOrder : mDimProd) {
+ NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size();
+ // Initialize EvalInfo of each RandomVariable.
+ for (auto& var : evalOrder) {
+ if (context->find(var) == context->end()) context->emplace(var, var);
+ NN_FUZZER_LOG << " - " << toString(var, context);
+ }
+
+ // Enforce the product of the dimension values below kMaxValue:
+ // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...)
+ int prod = 1;
+ for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin());
+ for (auto& var : evalOrder) {
+ auto& committed = context->at(var).committed;
+ int maxValue = kMaxValue / (prod / *committed.begin());
+ auto it = committed.upper_bound(maxValue);
+ // var has empty range -> no solution.
+ if (it == committed.begin()) return false;
+ // The range is not modified -> continue.
+ if (it == committed.end()) continue;
+ // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation.
+ committed.erase(it, committed.end());
+ context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ dirtySubnets->insert(indexMap.at(var));
+ }
+ }
+ return true;
+}
+
+bool RandomVariableNetwork::evalRange() {
+ constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500;
+ constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5;
+ NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks";
+ EvalContext context;
+ std::set<int> dirtySubnets; // Which subnets needs evaluation.
+ for (auto& pair : mEvalOrderMap) {
+ const auto& evalOrder = pair.second;
+ // Decide whether needs evaluation by timestamp -- if no range has changed after the last
+ // evaluation, then the subnet does not need re-evaluation.
+ if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue;
+ dirtySubnets.insert(pair.first);
+ }
+ if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
+
+ // Repeat until the ranges converge.
+ while (!dirtySubnets.empty()) {
+ for (int ind : dirtySubnets) {
+ const auto& evalOrder = mEvalOrderMap[ind];
+ NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size();
+
+ // Initialize EvalInfo of each RandomVariable.
+ for (auto& var : evalOrder) {
+ if (context.find(var) == context.end()) context.emplace(var, var);
+ NN_FUZZER_LOG << " - " << toString(var, &context);
+ }
+
+ // Dispatch to different algorithm according to search range.
+ bool success;
+ uint64_t numCombinations = getNumCombinations(evalOrder);
+ if (numCombinations <= kMaxNumCombinationsWithBruteForce) {
+ success = evalSubnetWithBruteForce(evalOrder, &context);
+ } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) {
+ success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context);
+ } else {
+ success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context);
+ }
+ if (!success) return false;
+ }
+ dirtySubnets.clear();
+ if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
+ }
+ // A successful evaluation, update RandomVariables from EvalContext.
+ for (auto& pair : context) pair.second.updateRange();
+ mTimestamp = getGlobalTime();
+ NN_FUZZER_LOG << "Finish range evaluation";
+ return true;
+}
+
+static void unsetEqual(const RandomVariableNode& node) {
+ if (node == nullptr) return;
+ NN_FUZZER_LOG << "Unset equality of var" << node->index;
+ auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) {
+ return ptr.lock() == node;
+ };
+ RandomVariableNode parent1 = node->parent1, parent2 = node->parent2;
+ parent1->children.erase(
+ std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual));
+ node->parent1 = nullptr;
+ if (parent2 != nullptr) {
+ // For Equal.
+ parent2->children.erase(
+ std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual));
+ node->parent2 = nullptr;
+ } else {
+ // For UnaryEqual.
+ node->type = RandomVariableType::FREE;
+ node->op = nullptr;
+ }
+}
+
+// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is
+// constructed. Only used when setEqualIfCompatible results in incompatible.
+class RandomVariableNetwork::Reverter {
+ public:
+ // Take a snapshot of RandomVariableNetwork when Reverter is constructed.
+ Reverter() : mSnapshot(*RandomVariableNetwork::get()) {}
+ // Add constraint (Equal) nodes to the reverter.
+ void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); }
+ void revert() {
+ NN_FUZZER_LOG << "Revert RandomVariableNetwork";
+ // Release the constraints.
+ for (const auto& node : mEqualNodes) unsetEqual(node);
+ // Reset all member variables.
+ *RandomVariableNetwork::get() = std::move(mSnapshot);
+ }
+
+ private:
+ Reverter(const Reverter&) = delete;
+ Reverter& operator=(const Reverter&) = delete;
+ RandomVariableNetwork mSnapshot;
+ std::vector<RandomVariableNode> mEqualNodes;
+};
+
+bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs,
+ const std::vector<RandomVariable>& rhs) {
+ NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {"
+ << joinStr(", ", rhs) << "}";
+ if (lhs.size() != rhs.size()) return false;
+ Reverter reverter;
+ bool result = true;
+ for (size_t i = 0; i < lhs.size(); i++) {
+ auto node = lhs[i].setEqual(rhs[i]).get();
+ reverter.addNode(node);
+ // Early terminate if there is no common choice between two ranges.
+ if (node != nullptr && node->range.empty()) result = false;
+ }
+ result = result && evalRange();
+ if (!result) reverter.revert();
+ NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]");
+ return result;
+}
+
+bool RandomVariableNetwork::freeze() {
+ NN_FUZZER_LOG << "Freeze the random network";
+ if (!evalRange()) return false;
+
+ std::vector<RandomVariableNode> nodes;
+ for (const auto& pair : mEvalOrderMap) {
+ // Find all FREE RandomVariables in the subnet.
+ for (const auto& var : pair.second) {
+ if (var->type == RandomVariableType::FREE) nodes.push_back(var);
+ }
+ }
+
+ // Randomly shuffle the order, this is for a more uniform randomness.
+ randomShuffle(&nodes);
+
+ // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable.
+ // TODO: Might be able to optimize this.
+ for (const auto& var : nodes) {
+ if (var->type != RandomVariableType::FREE) continue;
+ size_t size = var->range.size();
+ NN_FUZZER_LOG << "Freeze " << var;
+ var->freeze();
+ NN_FUZZER_LOG << " " << var;
+ // There is no need to re-evaluate if the FREE RandomVariable have only one choice.
+ if (size > 1) {
+ var->updateTimestamp();
+ if (!evalRange()) {
+ NN_FUZZER_LOG << "Freeze failed at " << var;
+ return false;
+ }
+ }
+ }
+ NN_FUZZER_LOG << "Finish freezing the random network";
+ return true;
+}
+
+} // namespace fuzzing_test
+} // namespace nn
+} // namespace android
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 2047cbe04..6e71652a9 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -41,7 +41,6 @@
#include "SampleDriverFull.h"
using android::nn::sample_driver::SampleDriverFull;
-using namespace android::nn::hal;
#endif
@@ -66,27 +65,27 @@ class TestDriverV1_1 : public V1_1::IDevice {
TestDriverV1_1()
: mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {}
static constexpr char name[] = "TestDriverV1_1";
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel(model, actualCallback);
@@ -102,19 +101,19 @@ class TestDriverV1_0 : public V1_0::IDevice {
TestDriverV1_0()
: mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {}
static constexpr char name[] = "TestDriverV1_0";
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
private:
const sp<V1_2::IDevice> mDriverV1_2;
diff --git a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
index 8fa93327f..53b5aad17 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
+++ b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -310,7 +310,7 @@ inline void defaultScalarOperandConstructor(TestOperandType dataType, uint32_t,
op->zeroPoint = 0;
break;
default:
- NN_FUZZER_CHECK(false) << "Data type " << toString(dataType)
+ NN_FUZZER_CHECK(false) << "Data type " << dataType
<< " is not supported in defaultScalarOperandConstructor.";
}
}
diff --git a/nn/tools/test_generator/test_harness/TestHarness.cpp b/nn/tools/test_generator/test_harness/TestHarness.cpp
index 56e1414fe..841eebfff 100644
--- a/nn/tools/test_generator/test_harness/TestHarness.cpp
+++ b/nn/tools/test_generator/test_harness/TestHarness.cpp
@@ -558,12 +558,12 @@ void dumpTestBufferToSpecFileHelper(const TestBuffer& buffer, bool useHexFloat,
} // namespace
-const char* toString(TestOperandType type) {
- return kOperandTypeNames[static_cast<int>(type)];
+std::ostream& operator<<(std::ostream& os, const TestOperandType& type) {
+ return os << kOperandTypeNames[static_cast<int>(type)];
}
-const char* toString(TestOperationType type) {
- return kOperationTypeNames[static_cast<int>(type)];
+std::ostream& operator<<(std::ostream& os, const TestOperationType& type) {
+ return os << kOperationTypeNames[static_cast<int>(type)];
}
// Dump a test buffer.
@@ -605,7 +605,7 @@ void SpecDumper::dumpTestBuffer(TestOperandType type, const TestBuffer& buffer,
void SpecDumper::dumpTestOperand(const TestOperand& operand, uint32_t index) {
mOs << "op" << index << " = " << getOperandClassInSpecFile(operand.lifetime) << "(\"op" << index
- << "\", [\"" << toString(operand.type) << "\", ["
+ << "\", [\"" << operand.type << "\", ["
<< join(", ", operand.dimensions, defaultToStringFunc<uint32_t>) << "]";
if (operand.scale != 0.0f || operand.zeroPoint != 0) {
mOs << ", float.fromhex(" << toHexFloatString(operand.scale) << "), " << operand.zeroPoint;
@@ -635,7 +635,7 @@ void SpecDumper::dumpTestOperand(const TestOperand& operand, uint32_t index) {
void SpecDumper::dumpTestOperation(const TestOperation& operation) {
auto toOperandName = [](uint32_t index) { return "op" + std::to_string(index); };
- mOs << "model = model.Operation(\"" << toString(operation.type) << "\", "
+ mOs << "model = model.Operation(\"" << operation.type << "\", "
<< join(", ", operation.inputs, toOperandName) << ").To("
<< join(", ", operation.outputs, toOperandName) << ")\n";
}
diff --git a/nn/tools/test_generator/test_harness/include/TestHarness.h b/nn/tools/test_generator/test_harness/include/TestHarness.h
index 0a7dce81d..be4f5625a 100644
--- a/nn/tools/test_generator/test_harness/include/TestHarness.h
+++ b/nn/tools/test_generator/test_harness/include/TestHarness.h
@@ -508,8 +508,8 @@ bool isQuantizedType(TestOperandType type);
TestModel convertQuant8AsymmOperandsToSigned(const TestModel& testModel);
-const char* toString(TestOperandType type);
-const char* toString(TestOperationType type);
+std::ostream& operator<<(std::ostream& os, const TestOperandType& type);
+std::ostream& operator<<(std::ostream& os, const TestOperationType& type);
// Dump a test model in the format of a spec file for debugging and visualization purpose.
class SpecDumper {