summaryrefslogtreecommitdiff
path: root/nn
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2020-04-09 14:52:23 -0700
committerXusong Wang <xusongw@google.com>2020-04-17 17:38:50 -0700
commit69db15d05d4f7a3c168185520cdcdb60f1d09fab (patch)
treefacba5b5cab875aebd0795a458a856008901eb19 /nn
parent9746e462ca0f4c2239feb28314e8857974322fda (diff)
downloadml-69db15d05d4f7a3c168185520cdcdb60f1d09fab.tar.gz
Add NNT_static internal tests for device memory allocation.
These tests use a customized IDevice to test the device memory allocation and fallback logic. This CL also allows the runtime to dispatch device memory allocation with dynamic shape to drivers. Additionally, this CL fixes a bug that a failed device memory allocation will return BAD_DATA -- it should return OP_FAILED instead. Bug: 152209365 Test: NNT_static Change-Id: I1facb2dad345958c3b9b1bab4a9564085c382c4a
Diffstat (limited to 'nn')
-rw-r--r--nn/runtime/Memory.cpp17
-rw-r--r--nn/runtime/test/Android.bp4
-rw-r--r--nn/runtime/test/TestMemoryDomain.cpp415
-rw-r--r--nn/runtime/test/TestNeuralNetworksWrapper.h24
-rw-r--r--nn/runtime/test/TestRemoveDefaultArguments.cpp4
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp8
6 files changed, 450 insertions, 22 deletions
diff --git a/nn/runtime/Memory.cpp b/nn/runtime/Memory.cpp
index d9a29d470..7bfaf5562 100644
--- a/nn/runtime/Memory.cpp
+++ b/nn/runtime/Memory.cpp
@@ -478,6 +478,8 @@ int MemoryBuilder::finish() {
const auto* cb = std::get<const CompilationBuilder*>(role);
return cb->createdWithExplicitDeviceList();
});
+ const uint32_t size = TypeManager::get()->getSizeOfData(mOperand->type, mDesc.dimensions);
+ mShouldFallback &= (size != 0);
mFinished = true;
return ANEURALNETWORKS_NO_ERROR;
}
@@ -488,17 +490,9 @@ std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const {
return {ANEURALNETWORKS_BAD_STATE, nullptr};
}
- // TODO(xusongw): Does not support dynamic output shape for now.
- CHECK(mOperand.has_value());
- uint32_t size = TypeManager::get()->getSizeOfData(mOperand->type, mDesc.dimensions);
- if (size == 0) {
- LOG(ERROR)
- << "ANeuralNetworksMemory_createFromDesc -- does not support unknown dimensions.";
- return {ANEURALNETWORKS_OP_FAILED, nullptr};
- }
-
int n = ANEURALNETWORKS_OP_FAILED;
std::unique_ptr<Memory> memory;
+ CHECK(mOperand.has_value());
// Try allocate the memory on device.
if (mAllocator != nullptr) {
@@ -507,6 +501,7 @@ std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const {
// If failed, fallback to ashmem or BLOB mode AHWB.
if (n != ANEURALNETWORKS_NO_ERROR && mShouldFallback) {
+ const uint32_t size = TypeManager::get()->getSizeOfData(mOperand->type, mDesc.dimensions);
if (mSupportsAhwb) {
VLOG(MEMORY) << "MemoryBuilder::allocate -- fallback to BLOB mode AHWB.";
std::tie(n, memory) = MemoryRuntimeAHWB::create(size);
@@ -661,11 +656,11 @@ std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<ha
uint32_t token) {
if (buffer == nullptr) {
LOG(ERROR) << "nullptr IBuffer for device memory.";
- return {ANEURALNETWORKS_BAD_DATA, nullptr};
+ return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
if (token <= 0) {
LOG(ERROR) << "Invalid token for device memory: " << token;
- return {ANEURALNETWORKS_BAD_DATA, nullptr};
+ return {ANEURALNETWORKS_OP_FAILED, nullptr};
}
return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)};
};
diff --git a/nn/runtime/test/Android.bp b/nn/runtime/test/Android.bp
index 40dd95f0b..8874eee69 100644
--- a/nn/runtime/test/Android.bp
+++ b/nn/runtime/test/Android.bp
@@ -126,6 +126,7 @@ cc_defaults {
"TestExecution.cpp",
"TestExtensions.cpp",
"TestIntrospectionControl.cpp",
+ "TestMemoryDomain.cpp",
"TestMemoryInternal.cpp",
"TestPartitioning.cpp",
"TestPartitioningRandom.cpp",
@@ -138,8 +139,11 @@ cc_defaults {
"Bridge.cpp",
],
static_libs: [
+ "android.hardware.neuralnetworks@1.0-adapter-helper",
+ "android.hardware.neuralnetworks@1.2-adapter-helper",
"libSampleDriver",
"libgmock",
+ "libhidladapter",
"libneuralnetworks_common",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_static",
diff --git a/nn/runtime/test/TestMemoryDomain.cpp b/nn/runtime/test/TestMemoryDomain.cpp
new file mode 100644
index 000000000..88ca2f4d8
--- /dev/null
+++ b/nn/runtime/test/TestMemoryDomain.cpp
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/hardware/neuralnetworks/1.2/ADevice.h>
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "HalInterfaces.h"
+#include "Manager.h"
+#include "Memory.h"
+#include "SampleDriver.h"
+#include "TestNeuralNetworksWrapper.h"
+
+using namespace android::nn;
+using namespace hal;
+using Result = test_wrapper::Result;
+using Type = test_wrapper::Type;
+
+namespace {
+
+// A buffer for test that does nothing.
+class TestBuffer : public IBuffer {
+ public:
+ Return<ErrorStatus> copyTo(const hidl_memory&) override {
+ return ErrorStatus::DEVICE_UNAVAILABLE;
+ }
+ Return<ErrorStatus> copyFrom(const hidl_memory&, const hidl_vec<uint32_t>&) override {
+ return ErrorStatus::DEVICE_UNAVAILABLE;
+ }
+};
+
+enum class AllocateReturn { OK, BAD_TOKEN, BAD_IBUFFER, BAD_STATUS, NOT_SUPPORTED };
+
+// Print AllocateReturn enum for better GTEST failure messages
+std::ostream& operator<<(std::ostream& os, AllocateReturn allocateReturn) {
+ switch (allocateReturn) {
+ case AllocateReturn::OK:
+ return os << "OK";
+ case AllocateReturn::BAD_IBUFFER:
+ return os << "BAD_IBUFFER";
+ case AllocateReturn::BAD_TOKEN:
+ return os << "BAD_TOKEN";
+ case AllocateReturn::BAD_STATUS:
+ return os << "BAD_STATUS";
+ case AllocateReturn::NOT_SUPPORTED:
+ return os << "NOT_SUPPORTED";
+ }
+ LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(allocateReturn);
+ return os;
+}
+
+class TestDriverLatest : public sample_driver::SampleDriver {
+ public:
+ TestDriverLatest(const char* name, std::set<OperationType> supportedOperations,
+ AllocateReturn allocateReturn)
+ : SampleDriver(name),
+ kSupportedOperations(std::move(supportedOperations)),
+ kAllocateReturn(allocateReturn) {}
+
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ android::nn::initVLogMask();
+ // Faster than cpu.
+ const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ const Capabilities capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = kPerf,
+ .relaxedFloat32toFloat16PerformanceTensor = kPerf,
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
+ .ifPerformance = kPerf,
+ .whilePerformance = kPerf};
+ cb(ErrorStatus::NONE, capabilities);
+ return Void();
+ }
+
+ Return<void> getSupportedOperations_1_3(const Model& model,
+ getSupportedOperations_1_3_cb cb) override {
+ // The tests will never use a referenced model.
+ CHECK(model.referenced.size() == 0);
+ std::vector<bool> supported(model.main.operations.size(), false);
+ std::transform(
+ model.main.operations.begin(), model.main.operations.end(), supported.begin(),
+ [this](const Operation& op) { return kSupportedOperations.count(op.type) > 0; });
+ cb(ErrorStatus::NONE, supported);
+ return Void();
+ }
+
+ Return<void> allocate(const BufferDesc&, const hidl_vec<sp<IPreparedModel>>&,
+ const hidl_vec<BufferRole>&, const hidl_vec<BufferRole>&,
+ allocate_cb cb) override {
+ switch (kAllocateReturn) {
+ case AllocateReturn::OK:
+ cb(ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
+ return Void();
+ case AllocateReturn::BAD_IBUFFER:
+ cb(ErrorStatus::NONE, nullptr, mValidBufferToken++);
+ return Void();
+ case AllocateReturn::BAD_TOKEN:
+ cb(ErrorStatus::NONE, new TestBuffer(), 0);
+ return Void();
+ case AllocateReturn::BAD_STATUS:
+ cb(ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
+ return Void();
+ case AllocateReturn::NOT_SUPPORTED:
+ cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ return Void();
+ }
+ LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn);
+ return Void();
+ }
+
+ private:
+ const std::set<OperationType> kSupportedOperations;
+ const AllocateReturn kAllocateReturn;
+ uint32_t mValidBufferToken = 1;
+};
+
+// Create the following model for test.
+//
+// input0 ---+
+// +--- ADD ---> output0 ---+
+// input1 ---+ +--- MUL ---> output1 (dynamic shape)
+// +--- SUB ---> temp ---+
+// input2 ---+
+//
+test_wrapper::Model createTestModel() {
+ test_wrapper::Model model;
+ test_wrapper::OperandType tensorTypeFullySpecified(Type::TENSOR_FLOAT32, {1});
+ test_wrapper::OperandType tensorTypeDynamicShape(Type::TENSOR_FLOAT32, {0});
+ test_wrapper::OperandType actType(Type::INT32, {});
+ uint32_t input0 = model.addOperand(&tensorTypeFullySpecified);
+ uint32_t input1 = model.addOperand(&tensorTypeFullySpecified);
+ uint32_t input2 = model.addOperand(&tensorTypeFullySpecified);
+ uint32_t temp = model.addOperand(&tensorTypeFullySpecified);
+ uint32_t output0 = model.addOperand(&tensorTypeFullySpecified);
+ uint32_t output1 = model.addOperand(&tensorTypeDynamicShape);
+ uint32_t act = model.addOperand(&actType);
+ int32_t activation = 0;
+ model.setOperandValue(act, &activation, sizeof(int32_t));
+ model.addOperation(ANEURALNETWORKS_ADD, {input0, input1, act}, {output0});
+ model.addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp});
+ model.addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1});
+ model.identifyInputsAndOutputs({input0, input1, input2}, {output0, output1});
+ EXPECT_EQ(model.finish(), Result::NO_ERROR);
+ return model;
+}
+
+// Test memory domain with the following parameters
+// - If true, use a V1_2 driver, otherwise, use the latest version;
+// - If true, compile with explicit device list, otherwise, compile in the default way;
+// - The return of the allocate function.
+using MemoryDomainTestParam = std::tuple<bool, bool, AllocateReturn>;
+
+class MemoryDomainTest : public ::testing::TestWithParam<MemoryDomainTestParam> {
+ protected:
+ void SetUp() override {
+ ::testing::TestWithParam<MemoryDomainTestParam>::SetUp();
+ if (DeviceManager::get()->getUseCpuOnly()) {
+ GTEST_SKIP();
+ }
+ // Clear the device list.
+ DeviceManager::get()->forTest_setDevices({});
+ }
+
+ void TearDown() override {
+ DeviceManager::get()->forTest_reInitializeDeviceList();
+ ::testing::TestWithParam<MemoryDomainTestParam>::TearDown();
+ }
+
+ // If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED.
+ void createAndRegisterDriver(const char* name, std::set<OperationType> supportedOperations,
+ AllocateReturn allocateReturn) {
+ sp<V1_0::IDevice> driver;
+ if (kUseV1_2Driver) {
+ CHECK(allocateReturn == AllocateReturn::NOT_SUPPORTED);
+ const sp<TestDriverLatest> testDriver =
+ new TestDriverLatest(name, supportedOperations, AllocateReturn::NOT_SUPPORTED);
+ driver = new V1_2::ADevice(testDriver);
+ } else {
+ driver = new TestDriverLatest(name, std::move(supportedOperations), allocateReturn);
+ }
+ DeviceManager::get()->forTest_registerDevice(name, driver);
+ }
+
+ // If not kCompileWithExplicitDeviceList, the input argument "deviceNames" is ignored.
+ test_wrapper::Compilation createCompilation(const std::vector<std::string>& deviceNames) {
+ test_wrapper::Compilation compilation;
+ if (kCompileWithExplicitDeviceList) {
+ // Map device names to ANeuralNetworksDevice.
+ std::map<std::string, ANeuralNetworksDevice*> deviceMap;
+ uint32_t numDevices = 0;
+ EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
+ for (uint32_t i = 0; i < numDevices; i++) {
+ ANeuralNetworksDevice* device = nullptr;
+ const char* name = nullptr;
+ EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
+ deviceMap.emplace(name, device);
+ }
+ std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size());
+ std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(),
+ [&deviceMap](const std::string& name) { return deviceMap.at(name); });
+ Result result;
+ std::tie(result, compilation) =
+ test_wrapper::Compilation::createForDevices(&kModel, devices);
+ EXPECT_EQ(result, Result::NO_ERROR);
+ } else {
+ compilation = test_wrapper::Compilation(&kModel);
+ }
+ EXPECT_EQ(compilation.finish(), Result::NO_ERROR);
+ return compilation;
+ }
+
+ std::pair<int, test_wrapper::Memory> allocateDeviceMemory(
+ const test_wrapper::Compilation& compilation, const std::vector<uint32_t>& inputIndexes,
+ const std::vector<uint32_t>& outputIndexes) {
+ const auto* annCompilation = compilation.getHandle();
+ ANeuralNetworksMemoryDesc* desc = nullptr;
+ EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
+ for (uint32_t index : inputIndexes) {
+ EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, annCompilation, index, 1.0f),
+ ANEURALNETWORKS_NO_ERROR);
+ }
+ for (uint32_t index : outputIndexes) {
+ EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, annCompilation, index, 1.0f),
+ ANEURALNETWORKS_NO_ERROR);
+ }
+ EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
+
+ ANeuralNetworksMemory* memory;
+ int n = ANeuralNetworksMemory_createFromDesc(desc, &memory);
+ ANeuralNetworksMemoryDesc_free(desc);
+ return {n, test_wrapper::Memory(memory)};
+ }
+
+ const bool kUseV1_2Driver = std::get<0>(GetParam());
+ const bool kCompileWithExplicitDeviceList = std::get<1>(GetParam());
+ const AllocateReturn kAllocateReturn = std::get<2>(GetParam());
+ static const test_wrapper::Model kModel;
+};
+
+const test_wrapper::Model MemoryDomainTest::kModel = createTestModel();
+
+// Test device memory allocation on a compilation with only a single partition.
+TEST_P(MemoryDomainTest, SinglePartition) {
+ createAndRegisterDriver("test_driver",
+ {OperationType::ADD, OperationType::SUB, OperationType::MUL},
+ kAllocateReturn);
+ auto compilation = createCompilation({"test_driver"});
+ ASSERT_NE(compilation.getHandle(), nullptr);
+
+ auto [n, memory] = allocateDeviceMemory(compilation, {0}, {0});
+ if (kAllocateReturn == AllocateReturn::OK) {
+ // The memory should be backed by the IBuffer returned from the driver.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_NE(m->getIBuffer(), nullptr);
+ } else {
+ if (kCompileWithExplicitDeviceList) {
+ // Should not fallback when the compiled with explicit device list.
+ ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
+ } else {
+ // The memory should fallback to ashmem or blob ahwb based on the driver version.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_EQ(m->getIBuffer(), nullptr);
+ const auto& hidlMemory = m->getHidlMemory();
+ EXPECT_TRUE(hidlMemory.valid());
+ if (kUseV1_2Driver) {
+ EXPECT_EQ(hidlMemory.name(), "ashmem");
+ } else {
+ EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ }
+ }
+ }
+}
+
+// Test device memory allocation on a compilation with multiple partitions.
+TEST_P(MemoryDomainTest, MultiplePartitions) {
+ createAndRegisterDriver("test_driver_add", {OperationType::ADD}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_sub", {OperationType::SUB}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_mul", {OperationType::MUL}, kAllocateReturn);
+ auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"});
+ ASSERT_NE(compilation.getHandle(), nullptr);
+
+ {
+ // input0 is only used in one single partition.
+ auto [n, memory] = allocateDeviceMemory(compilation, {0}, {});
+ if (kAllocateReturn == AllocateReturn::OK) {
+ // The memory should be backed by the IBuffer returned from the driver.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_NE(m->getIBuffer(), nullptr);
+ } else {
+ if (kCompileWithExplicitDeviceList) {
+ // Should not fallback when the compiled with explicit device list.
+ ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
+ } else {
+ // The memory should fallback to ashmem or blob ahwb based on the driver version.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_EQ(m->getIBuffer(), nullptr);
+ const auto& hidlMemory = m->getHidlMemory();
+ EXPECT_TRUE(hidlMemory.valid());
+ if (kUseV1_2Driver) {
+ EXPECT_EQ(hidlMemory.name(), "ashmem");
+ } else {
+ EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ }
+ }
+ }
+ }
+
+ {
+ // input1 is shared by two partitions with different drivers, so the runtime will not
+ // attempt to allocate on device.
+ auto [n, memory] = allocateDeviceMemory(compilation, {1}, {});
+ if (kCompileWithExplicitDeviceList) {
+ // Should not fallback when the compiled with explicit device list.
+ ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
+ } else {
+ // The memory should fallback to ashmem or blob ahwb based on the driver version.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_EQ(m->getIBuffer(), nullptr);
+ const auto& hidlMemory = m->getHidlMemory();
+ EXPECT_TRUE(hidlMemory.valid());
+ if (kUseV1_2Driver) {
+ EXPECT_EQ(hidlMemory.name(), "ashmem");
+ } else {
+ EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ }
+ }
+ }
+
+ {
+ // output0 is shared by two partitions with different drivers, so the runtime will not
+ // attempt to allocate on device.
+ auto [n, memory] = allocateDeviceMemory(compilation, {}, {0});
+ if (kCompileWithExplicitDeviceList) {
+ // Should not fallback when the compiled with explicit device list.
+ ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
+ } else {
+ // The memory should fallback to ashmem or blob ahwb based on the driver version.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_EQ(m->getIBuffer(), nullptr);
+ const auto& hidlMemory = m->getHidlMemory();
+ EXPECT_TRUE(hidlMemory.valid());
+ if (kUseV1_2Driver) {
+ EXPECT_EQ(hidlMemory.name(), "ashmem");
+ } else {
+ EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob");
+ }
+ }
+ }
+}
+
+// Test device memory allocation with dynamic shape.
+TEST_P(MemoryDomainTest, DynamicShape) {
+ createAndRegisterDriver("test_driver",
+ {OperationType::ADD, OperationType::SUB, OperationType::MUL},
+ kAllocateReturn);
+ auto compilation = createCompilation({"test_driver"});
+ ASSERT_NE(compilation.getHandle(), nullptr);
+
+ auto [n, memory] = allocateDeviceMemory(compilation, {}, {1});
+ if (kAllocateReturn == AllocateReturn::OK) {
+ // The memory should be backed by the IBuffer returned from the driver.
+ ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
+ const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ ASSERT_NE(m, nullptr);
+ EXPECT_NE(m->getIBuffer(), nullptr);
+ } else {
+ // We do not fallback in the case of dynamic shape.
+ ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
+ }
+}
+
+static const auto kAllocateReturnChoices =
+ testing::Values(AllocateReturn::OK, AllocateReturn::BAD_TOKEN, AllocateReturn::BAD_IBUFFER,
+ AllocateReturn::BAD_STATUS, AllocateReturn::NOT_SUPPORTED);
+
+INSTANTIATE_TEST_CASE_P(DeviceVersionLatest, MemoryDomainTest,
+ testing::Combine(testing::Values(false), testing::Bool(),
+ kAllocateReturnChoices));
+INSTANTIATE_TEST_CASE_P(DeviceVersionV1_2, MemoryDomainTest,
+ testing::Combine(testing::Values(true), testing::Bool(),
+ testing::Values(AllocateReturn::NOT_SUPPORTED)));
+
+} // namespace
diff --git a/nn/runtime/test/TestNeuralNetworksWrapper.h b/nn/runtime/test/TestNeuralNetworksWrapper.h
index 6df16e217..ae40121c7 100644
--- a/nn/runtime/test/TestNeuralNetworksWrapper.h
+++ b/nn/runtime/test/TestNeuralNetworksWrapper.h
@@ -23,6 +23,7 @@
#include <math.h>
#include <algorithm>
+#include <memory>
#include <optional>
#include <string>
#include <utility>
@@ -242,6 +243,21 @@ class Model {
class Compilation {
public:
+ // On success, createForDevice(s) will return Result::NO_ERROR and the created compilation;
+ // otherwise, it will return the error code and Compilation object wrapping a nullptr handle.
+ static std::pair<Result, Compilation> createForDevice(const Model* model,
+ const ANeuralNetworksDevice* device) {
+ return createForDevices(model, {device});
+ }
+ static std::pair<Result, Compilation> createForDevices(
+ const Model* model, const std::vector<const ANeuralNetworksDevice*>& devices) {
+ ANeuralNetworksCompilation* compilation = nullptr;
+ const Result result = static_cast<Result>(ANeuralNetworksCompilation_createForDevices(
+ model->getHandle(), devices.empty() ? nullptr : devices.data(), devices.size(),
+ &compilation));
+ return {result, Compilation(compilation)};
+ }
+
Compilation(const Model* model) {
int result = ANeuralNetworksCompilation_create(model->getHandle(), &mCompilation);
if (result != 0) {
@@ -272,11 +288,6 @@ class Compilation {
return *this;
}
- Result createForDevice(const Model* model, const ANeuralNetworksDevice* device) {
- return static_cast<Result>(ANeuralNetworksCompilation_createForDevices(
- model->getHandle(), &device, 1, &mCompilation));
- }
-
Result setPreference(ExecutePreference preference) {
return static_cast<Result>(ANeuralNetworksCompilation_setPreference(
mCompilation, static_cast<int32_t>(preference)));
@@ -300,6 +311,9 @@ class Compilation {
ANeuralNetworksCompilation* getHandle() const { return mCompilation; }
protected:
+ // Takes the ownership of ANeuralNetworksCompilation.
+ Compilation(ANeuralNetworksCompilation* compilation) : mCompilation(compilation) {}
+
ANeuralNetworksCompilation* mCompilation = nullptr;
};
diff --git a/nn/runtime/test/TestRemoveDefaultArguments.cpp b/nn/runtime/test/TestRemoveDefaultArguments.cpp
index 81cd7a475..6142aa610 100644
--- a/nn/runtime/test/TestRemoveDefaultArguments.cpp
+++ b/nn/runtime/test/TestRemoveDefaultArguments.cpp
@@ -158,8 +158,8 @@ class TestRemoveDefaultArguments : public ::testing::Test {
ASSERT_TRUE(model.isValid());
ASSERT_EQ(model.finish(), Result::NO_ERROR);
- WrapperCompilation compilation;
- ASSERT_EQ(compilation.createForDevice(&model, mTestDevice), Result::NO_ERROR);
+ auto [result, compilation] = WrapperCompilation::createForDevice(&model, mTestDevice);
+ ASSERT_EQ(result, Result::NO_ERROR);
ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
}
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 99d2aff80..4c5a60157 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -242,8 +242,8 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
// Create compilation for nnapi-reference.
ASSERT_TRUE(mDevices.find(kRefDeviceName) != mDevices.end());
const auto refDevice = mDevices[kRefDeviceName];
- test_wrapper::Compilation compilation;
- ASSERT_EQ(compilation.createForDevice(&model, refDevice), Result::NO_ERROR);
+ auto [result, compilation] = test_wrapper::Compilation::createForDevice(&model, refDevice);
+ ASSERT_EQ(result, Result::NO_ERROR);
ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
// Create request.
@@ -293,8 +293,8 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
if (shouldSkipTest(featureLevel)) return;
// Create compilation for device.
- test_wrapper::Compilation compilation;
- ASSERT_EQ(compilation.createForDevice(model, device), Result::NO_ERROR);
+ auto [result, compilation] = test_wrapper::Compilation::createForDevice(model, device);
+ ASSERT_EQ(result, Result::NO_ERROR);
Result compileReturn = compilation.finish();
// Even if the model is fully supported, the compilation may still fail, e.g. each operation
// is supported, but model is too big (too many operations and/or too-large constants) for