summaryrefslogtreecommitdiff
path: root/nn/runtime/test
diff options
context:
space:
mode:
Diffstat (limited to 'nn/runtime/test')
-rw-r--r--nn/runtime/test/Android.bp2
-rw-r--r--nn/runtime/test/Bridge.cpp36
-rw-r--r--nn/runtime/test/Bridge.h42
-rw-r--r--nn/runtime/test/TestCompilationCaching.cpp169
-rw-r--r--nn/runtime/test/TestCompliance.cpp19
-rw-r--r--nn/runtime/test/TestExecution.cpp374
-rw-r--r--nn/runtime/test/TestExtensions.cpp17
-rw-r--r--nn/runtime/test/TestFailingDriver.cpp17
-rw-r--r--nn/runtime/test/TestIntrospectionControl.cpp205
-rw-r--r--nn/runtime/test/TestMemoryDomain.cpp120
-rw-r--r--nn/runtime/test/TestPartitioning.cpp350
-rw-r--r--nn/runtime/test/TestPartitioningRandom.cpp126
-rw-r--r--nn/runtime/test/TestRemoveDefaultArguments.cpp9
-rw-r--r--nn/runtime/test/TestUnspecifiedDimensions.cpp3
-rw-r--r--nn/runtime/test/TestVersionedInterfaces.cpp541
-rw-r--r--nn/runtime/test/android_fuzzing/Converter.cpp31
-rw-r--r--nn/runtime/test/android_fuzzing/FuzzHarness.cpp2
-rw-r--r--nn/runtime/test/android_fuzzing/GenerateCorpus.cpp4
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp46
-rw-r--r--nn/runtime/test/fibonacci_extension/FibonacciDriver.h10
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.cpp10
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h71
-rw-r--r--nn/runtime/test/fuzzing/RandomVariable.cpp2450
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp31
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h2
25 files changed, 2372 insertions, 2315 deletions
diff --git a/nn/runtime/test/Android.bp b/nn/runtime/test/Android.bp
index 4ea388a1f..ac53b9a10 100644
--- a/nn/runtime/test/Android.bp
+++ b/nn/runtime/test/Android.bp
@@ -134,8 +134,6 @@ cc_defaults {
"fibonacci_extension/FibonacciExtensionTest.cpp",
"TestMain.cpp",
-
- "Bridge.cpp",
],
static_libs: [
"android.hardware.neuralnetworks@1.0-adapter-helper",
diff --git a/nn/runtime/test/Bridge.cpp b/nn/runtime/test/Bridge.cpp
deleted file mode 100644
index 574025620..000000000
--- a/nn/runtime/test/Bridge.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// There are name clashes between NeuralNetworksWrapper.h and
-// HalInterfaces.h. Many tests include the former; many internal
-// header files (nn/runtime/*.h) include the latter. This file
-// contains a few utilities for tests to call that trampoline to the
-// internal headers.
-
-#include "GraphDump.h"
-#include "ModelBuilder.h"
-
-namespace android {
-namespace nn {
-namespace bridge_tests {
-
-void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream) {
- ::android::nn::graphDump(name, model->makeHidlModel(), outStream);
-}
-
-} // namespace bridge_tests
-} // namespace nn
-} // namespace android
diff --git a/nn/runtime/test/Bridge.h b/nn/runtime/test/Bridge.h
deleted file mode 100644
index f067df0f3..000000000
--- a/nn/runtime/test/Bridge.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// There are name clashes between NeuralNetworksWrapper.h and
-// HalInterfaces.h. Many tests include the former; many internal
-// header files (nn/runtime/*.h) include the latter. This file
-// contains a few utilities for tests to call that trampoline to the
-// internal headers.
-
-#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
-#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
-
-#include <iostream>
-
-namespace android {
-namespace nn {
-
-class ModelBuilder;
-
-namespace bridge_tests {
-
-void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream = &std::cout);
-
-} // namespace bridge_tests
-
-} // namespace nn
-} // namespace android
-
-#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
diff --git a/nn/runtime/test/TestCompilationCaching.cpp b/nn/runtime/test/TestCompilationCaching.cpp
index 2311685d5..1a1cdc6c2 100644
--- a/nn/runtime/test/TestCompilationCaching.cpp
+++ b/nn/runtime/test/TestCompilationCaching.cpp
@@ -31,16 +31,17 @@
#include "TestNeuralNetworksWrapper.h"
using namespace android::nn;
-using namespace hal;
-using Result = test_wrapper::Result;
+namespace hardware = android::hardware;
+using WrapperResult = test_wrapper::Result;
using Type = test_wrapper::Type;
-const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
template <typename T>
using MQDescriptorSync = ::android::hardware::MQDescriptorSync<T>;
+using android::sp;
namespace android::hardware::neuralnetworks::V1_0 {
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
+::std::ostream& operator<<(::std::ostream& os, V1_3::ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
@@ -66,10 +67,10 @@ std::ostream& operator<<(std::ostream& os, HasCalledPrepareModel hasCalledPrepar
}
// Whether the driver is expected to be registered because it can pass initialization.
-bool canDeviceBeRegistered(ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
+bool canDeviceBeRegistered(V1_3::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) {
constexpr uint32_t maxNumCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
- return error == ErrorStatus::NONE && numModelCache <= maxNumCacheFiles &&
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES);
+ return error == V1_3::ErrorStatus::NONE && numModelCache <= maxNumCacheFiles &&
numDataCache <= maxNumCacheFiles;
}
@@ -94,55 +95,59 @@ class CachingDriver : public sample_driver::SampleDriver {
private:
static constexpr size_t kCacheSize = 256;
- class CachingPreparedModel : public IPreparedModel {
+ class CachingPreparedModel : public V1_3::IPreparedModel {
public:
CachingPreparedModel() = default;
- Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
- const sp<V1_0::IExecutionCallback>&) override {
+ hardware::Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+ const sp<V1_0::IExecutionCallback>&) override {
return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming,
- const sp<V1_2::IExecutionCallback>&) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request&, V1_2::MeasureTiming,
+ const sp<V1_2::IExecutionCallback>&) override {
return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming,
- const OptionalTimePoint&,
- const OptionalTimeoutDuration&,
- const sp<V1_3::IExecutionCallback>&) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request&, V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const sp<V1_3::IExecutionCallback>&) override {
return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming,
+ executeSynchronously_cb cb) override {
cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
- Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming,
- const OptionalTimePoint&,
- const OptionalTimeoutDuration&,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeSynchronously_1_3_cb cb) override {
cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
- Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
- const MQDescriptorSync<V1_2::FmqRequestDatum>&,
- const MQDescriptorSync<V1_2::FmqResultDatum>&,
- configureExecutionBurst_cb cb) override {
+ hardware::Return<void> configureExecutionBurst(
+ const sp<V1_2::IBurstCallback>&, const MQDescriptorSync<V1_2::FmqRequestDatum>&,
+ const MQDescriptorSync<V1_2::FmqResultDatum>&,
+ configureExecutionBurst_cb cb) override {
cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr);
- return Void();
+ return hardware::Void();
}
- Return<void> executeFenced(const hal::Request&, const hidl_vec<hidl_handle>&, MeasureTiming,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const OptionalTimeoutDuration&, executeFenced_cb cb) {
- cb(ErrorStatus::DEVICE_UNAVAILABLE, hidl_handle(nullptr), nullptr);
- return Void();
+ hardware::Return<void> executeFenced(const V1_3::Request&,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeFenced_cb cb) {
+ cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, hardware::hidl_handle(nullptr), nullptr);
+ return hardware::Void();
}
};
public:
- CachingDriver(std::string_view name, ErrorStatus errorStatusGetNumCacheFiles,
+ CachingDriver(std::string_view name, V1_3::ErrorStatus errorStatusGetNumCacheFiles,
uint32_t numModelCache, uint32_t numDataCache,
- ErrorStatus errorStatusPrepareFromCache)
+ V1_3::ErrorStatus errorStatusPrepareFromCache)
: SampleDriver(name.data()),
mErrorStatusGetNumCacheFiles(errorStatusGetNumCacheFiles),
mNumModelCache(numModelCache),
@@ -156,39 +161,40 @@ class CachingDriver : public sample_driver::SampleDriver {
~CachingDriver() override {}
// Reports faster than cpu.
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
// Reports supporting all operations.
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
// Reports according to mGetNumCacheFiles.
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
cb(convertToV1_0(mErrorStatusGetNumCacheFiles), mNumModelCache, mNumDataCache);
- return Void();
+ return hardware::Void();
}
// Generates CachingPreparedModel.
// Writes the cache entry per mCacheXData and sets mHasCalledPrepareModel.
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const Model&, ExecutionPreference, Priority, const OptionalTimePoint&,
- const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model&, V1_1::ExecutionPreference, V1_3::Priority,
+ const V1_3::OptionalTimePoint&,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& cb) override {
checkNumberOfCacheHandles(modelCacheHandle.size(), dataCacheHandle.size());
if (modelCacheHandle.size() != 0 || dataCacheHandle.size() != 0) {
@@ -204,9 +210,10 @@ class CachingDriver : public sample_driver::SampleDriver {
// Checks if the cache entry is correct, notifies error status according to
// mErrorStatusPrepareFromCache, sets mHasCalledPrepareModelFromCache.
- Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
- const OptionalTimePoint&, const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModelFromCache_1_3(
+ const V1_3::OptionalTimePoint&,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) override {
readFromCache(modelCacheHandle, mModelCacheData);
readFromCache(dataCacheHandle, mDataCacheData);
@@ -236,7 +243,8 @@ class CachingDriver : public sample_driver::SampleDriver {
}
}
- void writeToCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& cache) {
+ void writeToCache(const hardware::hidl_vec<hardware::hidl_handle>& handles,
+ const std::vector<uint8_t>& cache) {
for (uint32_t i = 0; i < handles.size(); ++i) {
ASSERT_EQ(handles[i]->numFds, 1);
EXPECT_EQ(write(handles[i]->data[0], cache.data(), kCacheSize),
@@ -244,7 +252,8 @@ class CachingDriver : public sample_driver::SampleDriver {
}
}
- void readFromCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& expected) {
+ void readFromCache(const hardware::hidl_vec<hardware::hidl_handle>& handles,
+ const std::vector<uint8_t>& expected) {
for (uint32_t i = 0; i < handles.size(); ++i) {
ASSERT_EQ(handles[i]->numFds, 1);
std::vector<uint8_t> actual(kCacheSize);
@@ -257,10 +266,10 @@ class CachingDriver : public sample_driver::SampleDriver {
std::vector<uint8_t> mModelCacheData;
std::vector<uint8_t> mDataCacheData;
- const ErrorStatus mErrorStatusGetNumCacheFiles;
+ const V1_3::ErrorStatus mErrorStatusGetNumCacheFiles;
const uint32_t mNumModelCache;
const uint32_t mNumDataCache;
- const ErrorStatus mErrorStatusPrepareFromCache;
+ const V1_3::ErrorStatus mErrorStatusPrepareFromCache;
bool mHasCalledPrepareModelFromCache = false;
HasCalledPrepareModel mHasCalledPrepareModel = HasCalledPrepareModel::NO;
@@ -279,7 +288,7 @@ void CreateBroadcastAddModel(test_wrapper::Model* model) {
model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c});
model->identifyInputsAndOutputs({a, b}, {c});
ASSERT_TRUE(model->isValid());
- ASSERT_EQ(model->finish(), Result::NO_ERROR);
+ ASSERT_EQ(model->finish(), WrapperResult::NO_ERROR);
}
void getDeviceWithName(std::string_view deviceName, const ANeuralNetworksDevice** outputDevice) {
@@ -307,17 +316,17 @@ void getDeviceWithName(std::string_view deviceName, const ANeuralNetworksDevice*
// - ErrorStatus returning from getNumberOfCacheFilesNeeded
// - Number of model cache files returning from getNumberOfCacheFilesNeeded
// - Number of data cache files returning from getNumberOfCacheFilesNeeded
-using DeviceRegistrationTestParam = std::tuple<ErrorStatus, uint32_t, uint32_t>;
+using DeviceRegistrationTestParam = std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t>;
class DeviceRegistrationTest : public ::testing::TestWithParam<DeviceRegistrationTestParam> {
protected:
static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching";
- const ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam());
+ const V1_3::ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam());
const uint32_t kNumModelCache = std::get<1>(GetParam());
const uint32_t kNumDataCache = std::get<2>(GetParam());
const sp<CachingDriver> kDriver =
new CachingDriver(kDeviceName, kErrorStatusGetNumCacheFiles, kNumModelCache,
- kNumDataCache, ErrorStatus::NONE);
+ kNumDataCache, V1_3::ErrorStatus::NONE);
};
TEST_P(DeviceRegistrationTest, CachingFailure) {
@@ -344,7 +353,7 @@ TEST_P(DeviceRegistrationTest, CachingFailure) {
// - Number of model cache files returning from getNumberOfCacheFilesNeeded
// - Number of data cache files returning from getNumberOfCacheFilesNeeded
// - ErrorStatus returning from prepareModelFromCache_1_3
-using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, ErrorStatus>;
+using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, V1_3::ErrorStatus>;
class CompilationCachingTest : public ::testing::TestWithParam<CompilationCachingTestParam> {
protected:
@@ -390,27 +399,29 @@ class CompilationCachingTest : public ::testing::TestWithParam<CompilationCachin
}
void createCache() {
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, ErrorStatus::NONE);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache,
+ kNumDataCache, V1_3::ErrorStatus::NONE);
compileModel(driver, /*withToken=*/true);
}
static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching";
const uint32_t kNumModelCache = std::get<0>(GetParam());
const uint32_t kNumDataCache = std::get<1>(GetParam());
- const ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam());
+ const V1_3::ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam());
const bool kIsCachingSupported = isCachingSupported(kNumModelCache, kNumDataCache);
test_wrapper::Model mModel;
std::string mCacheDir;
- const CacheToken kToken{};
+ const HalCacheToken kToken{};
};
TEST_P(CompilationCachingTest, TokenProvidedAndCacheNotExist) {
if (DeviceManager::get()->getUseCpuOnly()) {
return;
}
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/true);
// When cache file does not exist, the runtime should never call prepareModelFromCache_1_3.
@@ -427,8 +438,9 @@ TEST_P(CompilationCachingTest, TokenProvidedAndCacheExist) {
return;
}
createCache();
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/true);
// When cache files exist, the runtime should call prepareModelFromCache_1_3 iff caching
@@ -437,7 +449,7 @@ TEST_P(CompilationCachingTest, TokenProvidedAndCacheExist) {
HasCalledPrepareModel expectHasCalledPrepareModel;
if (kIsCachingSupported) {
- if (kErrorStatusPrepareFromCache == ErrorStatus::NONE) {
+ if (kErrorStatusPrepareFromCache == V1_3::ErrorStatus::NONE) {
// The runtime should not call prepareModel_1_3 iff caching supported and
// prepareModelFromCache_1_3 succeeds.
expectHasCalledPrepareModel = HasCalledPrepareModel::NO;
@@ -457,8 +469,9 @@ TEST_P(CompilationCachingTest, TokenNotProvided) {
if (DeviceManager::get()->getUseCpuOnly()) {
return;
}
- sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache,
- kNumDataCache, kErrorStatusPrepareFromCache);
+ sp<CachingDriver> driver =
+ new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache,
+ kErrorStatusPrepareFromCache);
compileModel(driver, /*withToken=*/false);
// When no NDK token is provided by the client, the runtime should never call
@@ -468,15 +481,15 @@ TEST_P(CompilationCachingTest, TokenNotProvided) {
}
static const auto kErrorStatusGetNumCacheFilesChoices =
- testing::Values(ErrorStatus::NONE, ErrorStatus::DEVICE_UNAVAILABLE);
+ testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::DEVICE_UNAVAILABLE);
static const auto kNumCacheChoices =
- testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES),
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES) + 1);
+ testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES),
+ static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES) + 1);
static const auto kNumValidCacheChoices =
- testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES));
static const auto kErrorStatusPrepareFromCacheChoices =
- testing::Values(ErrorStatus::NONE, ErrorStatus::GENERAL_FAILURE,
- ErrorStatus::DEVICE_UNAVAILABLE, ErrorStatus::INVALID_ARGUMENT);
+ testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE,
+ V1_3::ErrorStatus::DEVICE_UNAVAILABLE, V1_3::ErrorStatus::INVALID_ARGUMENT);
INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, DeviceRegistrationTest,
testing::Combine(kErrorStatusGetNumCacheFilesChoices, kNumCacheChoices,
diff --git a/nn/runtime/test/TestCompliance.cpp b/nn/runtime/test/TestCompliance.cpp
index d756c2414..299eebcf7 100644
--- a/nn/runtime/test/TestCompliance.cpp
+++ b/nn/runtime/test/TestCompliance.cpp
@@ -27,7 +27,6 @@
namespace android::nn::compliance_test {
-using namespace hal;
using namespace test_helper;
using HidlModel = V1_3::Model;
using WrapperModel = test_wrapper::Model;
@@ -42,7 +41,7 @@ static HidlModel createHidlModel(const WrapperModel& wrapperModel) {
auto modelBuilder = reinterpret_cast<const ModelBuilder*>(wrapperModel.getHandle());
EXPECT_TRUE(modelBuilder->isFinished());
EXPECT_TRUE(modelBuilder->isValid());
- return modelBuilder->makeHidlModel();
+ return convertToV1_3(modelBuilder->makeModel());
}
static void testAvailableSinceV1_3(const WrapperModel& wrapperModel) {
@@ -73,12 +72,12 @@ static void testAvailableSinceV1_0(const WrapperModel& wrapperModel) {
ASSERT_TRUE(compliantWithV1_0(hidlModel));
}
-static void testAvailableSinceV1_2(const Request& request) {
+static void testAvailableSinceV1_2(const V1_3::Request& request) {
ASSERT_FALSE(compliantWithV1_0(request));
ASSERT_TRUE(compliantWithV1_2(request));
}
-static void testAvailableSinceV1_3(const Request& request) {
+static void testAvailableSinceV1_3(const V1_3::Request& request) {
ASSERT_FALSE(compliantWithV1_0(request));
ASSERT_FALSE(compliantWithV1_2(request));
}
@@ -172,20 +171,20 @@ TEST_F(ComplianceTest, HardwareBufferModel) {
TEST_F(ComplianceTest, HardwareBufferRequest) {
const auto [n, ahwb] = MemoryRuntimeAHWB::create(1024);
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
+ V1_3::Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool();
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
ASSERT_TRUE(ahwbMemoryPool.hidlMemory().valid());
// AHardwareBuffer as input.
- testAvailableSinceV1_2(Request{
+ testAvailableSinceV1_2(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {ahwbMemoryPool, sharedMemoryPool},
});
// AHardwareBuffer as output.
- testAvailableSinceV1_2(Request{
+ testAvailableSinceV1_2(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {sharedMemoryPool, ahwbMemoryPool},
@@ -194,20 +193,20 @@ TEST_F(ComplianceTest, HardwareBufferRequest) {
#endif
TEST_F(ComplianceTest, DeviceMemory) {
- Request::MemoryPool sharedMemoryPool, deviceMemoryPool;
+ V1_3::Request::MemoryPool sharedMemoryPool, deviceMemoryPool;
sharedMemoryPool.hidlMemory(allocateSharedMemory(1024));
ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid());
deviceMemoryPool.token(1);
// Device memory as input.
- testAvailableSinceV1_3(Request{
+ testAvailableSinceV1_3(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {deviceMemoryPool, sharedMemoryPool},
});
// Device memory as output.
- testAvailableSinceV1_3(Request{
+ testAvailableSinceV1_3(V1_3::Request{
.inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}},
.outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}},
.pools = {sharedMemoryPool, deviceMemoryPool},
diff --git a/nn/runtime/test/TestExecution.cpp b/nn/runtime/test/TestExecution.cpp
index 3441f9fc4..5f012c3eb 100644
--- a/nn/runtime/test/TestExecution.cpp
+++ b/nn/runtime/test/TestExecution.cpp
@@ -38,49 +38,54 @@
namespace android {
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
using HidlModel = V1_3::Model;
using PreparedModelCallback = nn::PreparedModelCallback;
-using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
using WrapperCompilation = nn::test_wrapper::Compilation;
using WrapperEvent = nn::test_wrapper::Event;
using WrapperExecution = nn::test_wrapper::Execution;
using WrapperModel = nn::test_wrapper::Model;
using WrapperOperandType = nn::test_wrapper::OperandType;
+using WrapperResult = nn::test_wrapper::Result;
using WrapperType = nn::test_wrapper::Type;
using nn::convertToV1_0;
+using nn::convertToV1_3;
+using nn::ErrorStatus;
template <typename T>
using MQDescriptorSync = hardware::MQDescriptorSync<T>;
namespace {
-const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
// Wraps the latest version of IPreparedModel to allow dummying up the execution status,
// and control when the execution finishes.
-class TestPreparedModelLatest : public IPreparedModel {
+class TestPreparedModelLatest : public V1_3::IPreparedModel {
public:
// If errorStatus is NONE, then execute behaves normally (and sends back
// the actual execution status). Otherwise, don't bother to execute, and
// just send back errorStatus (as the execution status, not the launch
// status).
- TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mPreparedModelV1_0(preparedModel),
mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)),
mErrorStatus(errorStatus) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_0 != nullptr) << "V1_0 prepared model is nullptr.";
std::thread([this, request, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_0->execute(request, callback);
} else {
@@ -90,16 +95,17 @@ class TestPreparedModelLatest : public IPreparedModel {
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
std::thread([this, request, measure, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_2->execute_1_2(request, measure, callback);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
callback->notify_1_2(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
} else {
callback->notify_1_2(convertToV1_0(mErrorStatus), {}, kBadTiming);
@@ -108,19 +114,20 @@ class TestPreparedModelLatest : public IPreparedModel {
return V1_0::ErrorStatus::NONE;
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<V1_3::IExecutionCallback>& callback) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
std::thread([this, request, measure, deadline, loopTimeoutDuration, callback] {
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
// Note that we lose the actual launch status.
(void)mPreparedModelV1_3->execute_1_3(request, measure, deadline,
loopTimeoutDuration, callback);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
callback->notify_1_3(mErrorStatus, {shape}, kBadTiming);
} else {
callback->notify_1_3(mErrorStatus, {}, kBadTiming);
@@ -129,53 +136,55 @@ class TestPreparedModelLatest : public IPreparedModel {
return V1_3::ErrorStatus::NONE;
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_2->executeSynchronously(request, measure, cb);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
cb(convertToV1_0(mErrorStatus), {shape}, kBadTiming);
- return Void();
+ return hardware::Void();
} else {
cb(convertToV1_0(mErrorStatus), {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> executeSynchronously_1_3(const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ executeSynchronously_1_3_cb cb) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_3->executeSynchronously_1_3(request, measure, deadline,
loopTimeoutDuration, cb);
- } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- OutputShape shape = {.dimensions = {1}, .isSufficient = false};
+ } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false};
cb(mErrorStatus, {shape}, kBadTiming);
- return Void();
+ return hardware::Void();
} else {
cb(mErrorStatus, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
configureExecutionBurst_cb cb) override {
CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr.";
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_2->configureExecutionBurst(callback, requestChannel,
resultChannel, cb);
} else {
cb(convertToV1_0(mErrorStatus), nullptr);
- return Void();
+ return hardware::Void();
}
}
@@ -184,25 +193,27 @@ class TestPreparedModelLatest : public IPreparedModel {
// SampleDriver is written with that in mind. Therefore, this
// implementation is synchronous also. If the SampleDriver is updated to
// return real sync fence, this must be updated.
- Return<void> executeFenced(const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration,
- executeFenced_cb cb) override {
+ hardware::Return<void> executeFenced(const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration,
+ executeFenced_cb cb) override {
CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr.";
- CHECK(mErrorStatus != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE)
+ CHECK(mErrorStatus != V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE)
<< "executeFenced does not support dynamic output shape";
dummyExecution();
- if (mErrorStatus == ErrorStatus::NONE) {
+ if (mErrorStatus == V1_3::ErrorStatus::NONE) {
return mPreparedModelV1_3->executeFenced(request, waitFor, measure, deadline,
loopTimeoutDuration, duration, cb);
} else {
// Due to the limitations of the SampleDriver, all failures look
// like launch failures. If the SampleDriver is updated to return
// real sync fences, this must be updated.
- cb(mErrorStatus, hidl_handle(nullptr), nullptr);
+ cb(mErrorStatus, hardware::hidl_handle(nullptr), nullptr);
}
- return Void();
+ return hardware::Void();
}
// We can place the TestPreparedModelLatest system in a "pause" mode where
@@ -225,7 +236,7 @@ class TestPreparedModelLatest : public IPreparedModel {
const sp<V1_0::IPreparedModel> mPreparedModelV1_0;
const sp<V1_2::IPreparedModel> mPreparedModelV1_2;
const sp<V1_3::IPreparedModel> mPreparedModelV1_3;
- ErrorStatus mErrorStatus;
+ V1_3::ErrorStatus mErrorStatus;
static std::atomic<bool> mPauseExecutions;
static std::atomic<unsigned int> mExecutionsInFlight;
@@ -245,25 +256,27 @@ using TestPreparedModel13 = TestPreparedModelLatest;
// Like TestPreparedModelLatest, but implementing 1.2
class TestPreparedModel12 : public V1_2::IPreparedModel {
public:
- TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute_1_2(request, measure, callback);
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
return mLatestPreparedModel->executeSynchronously(request, measure, cb);
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -273,22 +286,22 @@ class TestPreparedModel12 : public V1_2::IPreparedModel {
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Like TestPreparedModelLatest, but implementing 1.0
class TestPreparedModel10 : public V1_0::IPreparedModel {
public:
- TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus)
+ TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus)
: mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Behaves like SampleDriver, except that it produces wrapped IPreparedModel.
@@ -300,13 +313,13 @@ class TestDriver13 : public SampleDriver {
// status). Otherwise, don't bother to execute, and just send
// back errorStatus (as the execution status, not the launch
// status).
- TestDriver13(const std::string& name, ErrorStatus errorStatus)
+ TestDriver13(const std::string& name, V1_3::ErrorStatus errorStatus)
: SampleDriver(name.c_str()), mErrorStatus(errorStatus) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance =
@@ -314,41 +327,43 @@ class TestDriver13 : public SampleDriver {
.ifPerformance = kPerf,
.whilePerformance = kPerf};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
+ hardware::Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
model, preference, priority, deadline, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
}
- if (prepareModelReturn != ErrorStatus::NONE) {
+ if (prepareModelReturn != V1_3::ErrorStatus::NONE) {
actualCallback->notify_1_3(
- localCallback->getStatus(),
+ convertToV1_3(localCallback->getStatus()),
V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
return prepareModelReturn;
}
localCallback->wait();
if (localCallback->getStatus() != ErrorStatus::NONE) {
actualCallback->notify_1_3(
- localCallback->getStatus(),
+ convertToV1_3(localCallback->getStatus()),
V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel()));
} else {
actualCallback->notify_1_3(
@@ -358,13 +373,14 @@ class TestDriver13 : public SampleDriver {
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
+ hardware::Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
model, preference, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
@@ -388,11 +404,11 @@ class TestDriver13 : public SampleDriver {
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
- Return<V1_0::ErrorStatus> prepareModelReturn =
+ hardware::Return<V1_0::ErrorStatus> prepareModelReturn =
SampleDriver::prepareModel_1_1(model, preference, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
return prepareModelReturn;
@@ -414,75 +430,79 @@ class TestDriver13 : public SampleDriver {
return prepareModelReturn;
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
- actualCallback);
+ return prepareModel_1_1(nn::convertToV1_1(model),
+ V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, actualCallback);
}
private:
- ErrorStatus mErrorStatus;
+ V1_3::ErrorStatus mErrorStatus;
};
// Like TestDriver, but implementing 1.2
class TestDriver12 : public V1_2::IDevice {
public:
- TestDriver12(const std::string& name, ErrorStatus errorStatus)
+ TestDriver12(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const nn::HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
@@ -493,29 +513,29 @@ class TestDriver12 : public V1_2::IDevice {
// Like TestDriver, but implementing 1.1
class TestDriver11 : public V1_1::IDevice {
public:
- TestDriver11(const std::string& name, ErrorStatus errorStatus)
+ TestDriver11(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -528,21 +548,21 @@ class TestDriver11 : public V1_1::IDevice {
// Like TestDriver, but implementing 1.0
class TestDriver10 : public V1_0::IDevice {
public:
- TestDriver10(const std::string& name, ErrorStatus errorStatus)
+ TestDriver10(const std::string& name, V1_3::ErrorStatus errorStatus)
: mLatestDriver(new TestDriver13(name, errorStatus)) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
@@ -560,7 +580,7 @@ class TestCompilation : public WrapperCompilation {
// Otherwise, don't bother to execute, and just send back
// errorStatus (as the execution status, not the launch status).
TestCompilation(const WrapperModel* model, const std::string& deviceName,
- ErrorStatus errorStatus) {
+ V1_3::ErrorStatus errorStatus) {
std::vector<std::shared_ptr<Device>> devices;
auto device = DeviceManager::forTest_makeDriverDevice(
deviceName, new DriverClass(deviceName, errorStatus));
@@ -613,7 +633,7 @@ class TestIntrospectionCompilation : public WrapperCompilation {
template <class DriverClass>
class ExecutionTestTemplate
- : public ::testing::TestWithParam<std::tuple<ErrorStatus, Result, bool>> {
+ : public ::testing::TestWithParam<std::tuple<V1_3::ErrorStatus, WrapperResult, bool>> {
public:
ExecutionTestTemplate()
: kName(toString(std::get<0>(GetParam()))),
@@ -648,11 +668,11 @@ class ExecutionTestTemplate
// sends back the actual execution status). Otherwise, don't
// bother to execute, and just send back kForceErrorStatus (as the
// execution status, not the launch status).
- const ErrorStatus kForceErrorStatus;
+ const V1_3::ErrorStatus kForceErrorStatus;
- // What result do we expect from the execution? (The Result
+ // What result do we expect from the execution? (The WrapperResult
// equivalent of kForceErrorStatus.)
- const Result kExpectResult;
+ const WrapperResult kExpectResult;
// Whether mCompilation is created via Introspection API or not.
const bool kUseIntrospectionAPI;
@@ -663,8 +683,10 @@ class ExecutionTestTemplate
void setInputOutput(WrapperExecution* execution) {
mInputBuffer = kInputBuffer;
mOutputBuffer = kOutputBufferInitial;
- ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)), Result::NO_ERROR);
- ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)), Result::NO_ERROR);
+ ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)),
+ WrapperResult::NO_ERROR);
+ ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)),
+ WrapperResult::NO_ERROR);
}
const float kInputBuffer = 3.14;
@@ -683,7 +705,7 @@ class ExecutionTestTemplate
uint32_t output = model.addOperand(&tensorType);
model.addOperation(ANEURALNETWORKS_FLOOR, {input}, {output});
model.identifyInputsAndOutputs({input}, {output});
- assert(model.finish() == Result::NO_ERROR);
+ assert(model.finish() == WrapperResult::NO_ERROR);
return model;
}
@@ -697,13 +719,13 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
GTEST_SKIP();
}
- ASSERT_EQ(mCompilation.finish(), Result::NO_ERROR);
+ ASSERT_EQ(mCompilation.finish(), WrapperResult::NO_ERROR);
const auto getDimensionsWhileRunning = [](WrapperExecution& execution) {
TestPreparedModelLatest::waitForExecutionToBegin();
// Cannot query dimensions while execution is running
std::vector<uint32_t> dimensions;
- EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), WrapperResult::BAD_STATE);
};
{
@@ -712,21 +734,22 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution));
TestPreparedModelLatest::pauseExecutions(true);
WrapperEvent event;
- ASSERT_EQ(execution.startCompute(&event), Result::NO_ERROR);
+ ASSERT_EQ(execution.startCompute(&event), WrapperResult::NO_ERROR);
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
ASSERT_EQ(event.wait(), kExpectResult);
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
{
@@ -738,17 +761,18 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
{
@@ -767,20 +791,21 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR ||
- kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult == WrapperResult::NO_ERROR ||
+ kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
- if (kExpectResult != Result::OUTPUT_INSUFFICIENT_SIZE) {
+ if (kExpectResult != WrapperResult::OUTPUT_INSUFFICIENT_SIZE) {
// computeWithDependencies doesn't support OUTPUT_INSUFFICIENT_SIZE
SCOPED_TRACE("computeWithDependencies");
WrapperExecution execution(&mCompilation);
@@ -796,32 +821,35 @@ void ExecutionTestTemplate<DriverClass>::TestWait() {
getDimensionsWhileRunning(execution);
TestPreparedModelLatest::pauseExecutions(false);
run.join();
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
ASSERT_EQ(event.wait(), kExpectResult);
ASSERT_EQ(mOutputBuffer, kOutputBufferExpected);
} else {
- ASSERT_EQ(event.wait(), Result::UNEXPECTED_NULL);
+ ASSERT_EQ(event.wait(), WrapperResult::UNEXPECTED_NULL);
}
std::vector<uint32_t> dimensions;
- if (kExpectResult == Result::NO_ERROR) {
+ if (kExpectResult == WrapperResult::NO_ERROR) {
// Only one output operand, hardcoded as index 0.
ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult);
ASSERT_EQ(dimensions, kOutputDimensionsExpected);
} else {
- ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE);
+ ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions),
+ WrapperResult::BAD_STATE);
}
}
}
auto kTestValues = ::testing::Values(
- std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE,
+ std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR,
+ /* kUseIntrospectionAPI */ false),
+ std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED,
+ std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE,
+ std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+ WrapperResult::OUTPUT_INSUFFICIENT_SIZE,
/* kUseIntrospectionAPI */ false),
- std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
+ std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA,
/* kUseIntrospectionAPI */ false));
class ExecutionTest13 : public ExecutionTestTemplate<TestDriver13> {};
@@ -838,27 +866,29 @@ INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest12, kTestValues);
class ExecutionTest11 : public ExecutionTestTemplate<TestDriver11> {};
TEST_P(ExecutionTest11, Wait) {
- if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
+ if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
TestWait();
}
INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest11, kTestValues);
class ExecutionTest10 : public ExecutionTestTemplate<TestDriver10> {};
TEST_P(ExecutionTest10, Wait) {
- if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
+ if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return;
TestWait();
}
INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest10, kTestValues);
auto kIntrospectionTestValues = ::testing::Values(
- std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE,
+ std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR,
+ /* kUseIntrospectionAPI */ true),
+ std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED,
+ std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE,
+ std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+ WrapperResult::OUTPUT_INSUFFICIENT_SIZE,
/* kUseIntrospectionAPI */ true),
- std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
+ std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA,
/* kUseIntrospectionAPI */ true));
INSTANTIATE_TEST_SUITE_P(IntrospectionFlavor, ExecutionTest13, kIntrospectionTestValues);
diff --git a/nn/runtime/test/TestExtensions.cpp b/nn/runtime/test/TestExtensions.cpp
index f104854b9..da13073e2 100644
--- a/nn/runtime/test/TestExtensions.cpp
+++ b/nn/runtime/test/TestExtensions.cpp
@@ -32,7 +32,9 @@ using DeviceManager = ::android::nn::DeviceManager;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
using TypeManager = ::android::nn::TypeManager;
-using namespace android::nn::hal;
+namespace hardware = ::android::hardware;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
const char* kTestDriverName = "extensions-test-driver";
const char* kTestExtension1 = "vendor.test.one";
@@ -44,23 +46,24 @@ class TestDriver : public SampleDriver {
TestDriver() : SampleDriver(kTestDriverName) {}
~TestDriver() override {}
- Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override {
cb(V1_0::ErrorStatus::NONE, {
{.name = kTestExtension1},
{.name = kTestExtension2},
{.name = kTestExtension3},
});
- return Void();
+ return hardware::Void();
}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_1_3_cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model&,
+ getSupportedOperations_1_3_cb) override {
CHECK(false) << "not implemented";
- return Void();
+ return hardware::Void();
}
};
diff --git a/nn/runtime/test/TestFailingDriver.cpp b/nn/runtime/test/TestFailingDriver.cpp
index 7d41ace20..d2e30a656 100644
--- a/nn/runtime/test/TestFailingDriver.cpp
+++ b/nn/runtime/test/TestFailingDriver.cpp
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include <algorithm>
#include <memory>
#include <vector>
@@ -28,7 +29,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using sample_driver::SampleDriverPartial;
using Result = test_wrapper::Result;
using WrapperOperandType = test_wrapper::OperandType;
@@ -50,20 +50,21 @@ class FailingTestDriver : public SampleDriverPartial {
// EmptyOperationResolver causes execution to fail.
FailingTestDriver() : SampleDriverPartial(kTestDriverName, &mEmptyOperationResolver) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE,
- {.operandPerformance = {{.type = OperandType::TENSOR_FLOAT32,
+ {.operandPerformance = {{.type = V1_3::OperandType::TENSOR_FLOAT32,
.info = {.execTime = 0.1, // Faster than CPU.
.powerUsage = 0.1}}}});
- return Void();
+ return hardware::Void();
}
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override {
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override {
std::vector<bool> supported(model.main.operations.size());
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [](const Operation& operation) { return operation.type == OperationType::SQRT; });
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [](const V1_3::Operation& operation) {
+ return operation.type == V1_3::OperationType::SQRT;
+ });
return supported;
}
diff --git a/nn/runtime/test/TestIntrospectionControl.cpp b/nn/runtime/test/TestIntrospectionControl.cpp
index 972619ef5..abb7e3306 100644
--- a/nn/runtime/test/TestIntrospectionControl.cpp
+++ b/nn/runtime/test/TestIntrospectionControl.cpp
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include <algorithm>
#include <chrono>
#include <iterator>
#include <map>
@@ -41,7 +42,10 @@
namespace {
using namespace ::android;
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
@@ -63,40 +67,42 @@ using nn::convertToV1_3;
template <typename T>
using MQDescriptorSync = hardware::MQDescriptorSync<T>;
-constexpr Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
-constexpr Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456};
-constexpr Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56};
+constexpr V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+constexpr V1_2::Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456};
+constexpr V1_2::Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56};
// This is an IDevice for testing purposes. The test driver has customized
// getCapabilities_1_3 and getSupportedOperations_1_3.
class TestDriver : public SampleDriver {
public:
- TestDriver(const char* name, Capabilities capabilities, const std::vector<bool>& supportedOps)
+ TestDriver(const char* name, V1_3::Capabilities capabilities,
+ const std::vector<bool>& supportedOps)
: SampleDriver(name), mCapabilities(capabilities), mSupportedOps(supportedOps) {}
~TestDriver() override {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, mCapabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
- return Void();
+ return hardware::Void();
}
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [this](Operation op) { return mSupportedOps[static_cast<int32_t>(op.type)]; });
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [this](V1_3::Operation op) {
+ return mSupportedOps[static_cast<int32_t>(op.type)];
+ });
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
private:
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
std::vector<bool> mSupportedOps;
};
@@ -119,7 +125,7 @@ class IntrospectionControlTest : public ::testing::Test {
struct DeviceSpecification {
DeviceSpecification(const std::string& name, float perf, std::vector<bool>& supportedOps)
: mName(name), mSupportedOps(supportedOps) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
mCapabilities = {
.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
@@ -129,7 +135,7 @@ class IntrospectionControlTest : public ::testing::Test {
.whilePerformance = perfInfo};
}
std::string mName;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
std::vector<bool> mSupportedOps;
};
@@ -383,14 +389,14 @@ std::ostream& operator<<(std::ostream& os, Success success) {
// Returns (unfenced timing, fenced timing).
// Not for PASS_CPU.
-std::pair<Timing, Timing> getExpectedTiming(Success s, bool fencedExecution) {
+std::pair<V1_2::Timing, V1_2::Timing> getExpectedTiming(Success s, bool fencedExecution) {
CHECK_NE(s, Success::PASS_CPU);
if (!hasBit(s, Success::PASS_BIT)) {
return {kBadTiming, kBadTiming};
}
- std::pair<Timing, Timing> result;
+ std::pair<V1_2::Timing, V1_2::Timing> result;
result.first.timeOnDevice = hasBit(s, Success::PASS_UNFENCED_DEVICE_BIT)
? kGoodUnfencedTiming.timeOnDevice
: UINT64_MAX;
@@ -416,12 +422,12 @@ std::pair<Timing, Timing> getExpectedTiming(Success s, bool fencedExecution) {
class TestPreparedModelLatest : public SamplePreparedModel {
public:
TestPreparedModelLatest(const HidlModel& model, const SampleDriver* driver, Success success)
- : SamplePreparedModel(model, driver, ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{},
- kDefaultPriority),
+ : SamplePreparedModel(model, driver, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{},
+ nn::kDefaultPriority13),
mSuccess(success) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request&, const sp<V1_0::IExecutionCallback>& callback) override {
switch (mSuccess) {
case Success::PASS_NEITHER:
std::thread([callback] {
@@ -445,9 +451,10 @@ class TestPreparedModelLatest : public SamplePreparedModel {
}
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request&, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
switch (mSuccess) {
case Success::PASS_NEITHER:
case Success::PASS_DEVICE:
@@ -475,17 +482,18 @@ class TestPreparedModelLatest : public SamplePreparedModel {
}
}
- Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const sp<V1_3::IExecutionCallback>& callback) override {
+ hardware::Return<V1_3::ErrorStatus> execute_1_3(
+ const V1_3::Request&, V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const sp<V1_3::IExecutionCallback>& callback) override {
// Use a placeholder V1_0::Request because execute_1_2 ignores request entirely.
const V1_0::ErrorStatus status = execute_1_2(V1_0::Request{}, measure, callback);
return convertToV1_3(status);
}
- Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming measure,
- executeSynchronously_cb cb) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
switch (mSuccess) {
case Success::PASS_NEITHER:
case Success::PASS_DEVICE:
@@ -493,7 +501,7 @@ class TestPreparedModelLatest : public SamplePreparedModel {
case Success::PASS_BOTH:
dummyExecution();
cb(V1_0::ErrorStatus::NONE, {}, getExpectedTiming(mSuccess, false).first);
- return Void();
+ return hardware::Void();
case Success::FAIL_WAIT:
// While this is a synchronous execution method, the NNAPI
// runtime may call it even for asynchronous execution, so we
@@ -503,19 +511,22 @@ class TestPreparedModelLatest : public SamplePreparedModel {
case Success::FAIL_LAUNCH:
dummyExecution();
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
default:
ADD_FAILURE() << "Unexpected Success kind";
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming);
- return Void();
+ return hardware::Void();
}
}
- Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- executeSynchronously_1_3_cb cb) override {
+ hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeSynchronously_1_3_cb cb) override {
const auto wrappedCb = [&cb](V1_0::ErrorStatus status,
- const hidl_vec<OutputShape>& outputShapes, Timing timing) {
+ const hardware::hidl_vec<V1_2::OutputShape>& outputShapes,
+ V1_2::Timing timing) {
cb(convertToV1_3(status), outputShapes, timing);
};
// Use a placeholder V1_0::Request because executeSynchronously ignores request entirely.
@@ -525,7 +536,7 @@ class TestPreparedModelLatest : public SamplePreparedModel {
// ExecutionBurstServer::create has an overload that will use
// IPreparedModel::executeSynchronously(), so we can rely on that, rather
// than having to implement ExecutionBurstServer::IExecutorWithCache.
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -534,21 +545,26 @@ class TestPreparedModelLatest : public SamplePreparedModel {
callback, requestChannel, resultChannel, this, std::chrono::microseconds{0});
cb(burst == nullptr ? V1_0::ErrorStatus::GENERAL_FAILURE : V1_0::ErrorStatus::NONE, burst);
- return Void();
+ return hardware::Void();
}
- Return<void> executeFenced(const Request&, const hidl_vec<hidl_handle>&, MeasureTiming measure,
- const OptionalTimePoint&, const OptionalTimeoutDuration&,
- const OptionalTimeoutDuration&, executeFenced_cb callback) override {
- EXPECT_EQ(measure, MeasureTiming::YES);
+ hardware::Return<void> executeFenced(const V1_3::Request&,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint&,
+ const V1_3::OptionalTimeoutDuration&,
+ const V1_3::OptionalTimeoutDuration&,
+ executeFenced_cb callback) override {
+ EXPECT_EQ(measure, V1_2::MeasureTiming::YES);
if (hasBit(mSuccess, Success::PASS_BIT)) {
dummyExecution();
const auto expectedTiming = getExpectedTiming(mSuccess, true);
sp<SampleFencedExecutionCallback> fencedExecutionCallback =
new SampleFencedExecutionCallback(expectedTiming.first, expectedTiming.second,
V1_3::ErrorStatus::NONE);
- callback(V1_3::ErrorStatus::NONE, hidl_handle(nullptr), fencedExecutionCallback);
- return Void();
+ callback(V1_3::ErrorStatus::NONE, hardware::hidl_handle(nullptr),
+ fencedExecutionCallback);
+ return hardware::Void();
}
switch (mSuccess) {
case Success::FAIL_WAIT:
@@ -559,11 +575,12 @@ class TestPreparedModelLatest : public SamplePreparedModel {
FALLTHROUGH_INTENDED;
case Success::FAIL_LAUNCH:
dummyExecution();
- callback(V1_3::ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr);
- return Void();
+ callback(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr),
+ nullptr);
+ return hardware::Void();
default:
ADD_FAILURE() << "Unexpected Success kind";
- return Void();
+ return hardware::Void();
}
}
@@ -607,22 +624,24 @@ class TestPreparedModel12 : public V1_2::IPreparedModel {
TestPreparedModel12(const HidlModel& model, const SampleDriver* driver, Success success)
: mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
- Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
- const sp<V1_2::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute_1_2(
+ const V1_0::Request& request, V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute_1_2(request, measure, callback);
}
- Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure,
- executeSynchronously_cb cb) override {
+ hardware::Return<void> executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) override {
return mLatestPreparedModel->executeSynchronously(request, measure, cb);
}
- Return<void> configureExecutionBurst(
+ hardware::Return<void> configureExecutionBurst(
const sp<V1_2::IBurstCallback>& callback,
const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -632,7 +651,7 @@ class TestPreparedModel12 : public V1_2::IPreparedModel {
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Like TestPreparedModelLatest, but implementing 1.0
@@ -641,13 +660,13 @@ class TestPreparedModel10 : public V1_0::IPreparedModel {
TestPreparedModel10(const HidlModel& model, const SampleDriver* driver, Success success)
: mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {}
- Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
- const sp<V1_0::IExecutionCallback>& callback) override {
+ hardware::Return<V1_0::ErrorStatus> execute(
+ const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override {
return mLatestPreparedModel->execute(request, callback);
}
private:
- const sp<IPreparedModel> mLatestPreparedModel;
+ const sp<V1_3::IPreparedModel> mLatestPreparedModel;
};
// Behaves like SampleDriver, except that it produces customized IPrepareModel.
@@ -656,31 +675,31 @@ class TestDriver13 : public SampleDriver {
TestDriver13(const std::string& name, Success success)
: SampleDriver(name.c_str()), mSuccess(success) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance =
nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.main.operations.size(), true);
cb(V1_3::ErrorStatus::NONE, supported);
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.operations.size(), true);
cb(V1_0::ErrorStatus::NONE, supported);
@@ -688,39 +707,41 @@ class TestDriver13 : public SampleDriver {
std::vector<bool> supported;
cb(V1_0::ErrorStatus::INVALID_ARGUMENT, supported);
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference, Priority, const OptionalTimePoint&,
- const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference, V1_3::Priority,
+ const V1_3::OptionalTimePoint&, const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&,
const sp<V1_3::IPreparedModelCallback>& callback) override {
callback->notify_1_3(V1_3::ErrorStatus::NONE,
new TestPreparedModel13(model, this, mSuccess));
return V1_3::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const CacheToken&,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference,
+ const hardware::hidl_vec<hardware::hidl_handle>&,
+ const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) override {
callback->notify_1_2(V1_0::ErrorStatus::NONE,
new TestPreparedModel12(nn::convertToV1_3(model), this, mSuccess));
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference,
const sp<V1_0::IPreparedModelCallback>& callback) override {
callback->notify(V1_0::ErrorStatus::NONE,
new TestPreparedModel10(nn::convertToV1_3(model), this, mSuccess));
return V1_0::ErrorStatus::NONE;
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override {
- return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER,
- callback);
+ return prepareModel_1_1(nn::convertToV1_1(model),
+ V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, callback);
}
private:
@@ -732,27 +753,27 @@ class TestDriver11 : public V1_1::IDevice {
public:
TestDriver11(const std::string& name, Success success)
: mLatestDriver(new TestDriver13(name, success)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
diff --git a/nn/runtime/test/TestMemoryDomain.cpp b/nn/runtime/test/TestMemoryDomain.cpp
index 06418e5af..35a826ab8 100644
--- a/nn/runtime/test/TestMemoryDomain.cpp
+++ b/nn/runtime/test/TestMemoryDomain.cpp
@@ -34,20 +34,22 @@
#include "TestUtils.h"
using namespace android::nn;
-using namespace hal;
-using Result = test_wrapper::Result;
+namespace hardware = android::hardware;
+using WrapperResult = test_wrapper::Result;
using Type = test_wrapper::Type;
+using android::sp;
namespace {
// A buffer for test that does nothing.
-class TestBuffer : public IBuffer {
+class TestBuffer : public V1_3::IBuffer {
public:
- Return<ErrorStatus> copyTo(const hidl_memory&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
+ hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory&) override {
+ return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
- Return<ErrorStatus> copyFrom(const hidl_memory&, const hidl_vec<uint32_t>&) override {
- return ErrorStatus::DEVICE_UNAVAILABLE;
+ hardware::Return<V1_3::ErrorStatus> copyFrom(const hardware::hidl_memory&,
+ const hardware::hidl_vec<uint32_t>&) override {
+ return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
}
};
@@ -73,64 +75,67 @@ std::ostream& operator<<(std::ostream& os, AllocateReturn allocateReturn) {
class TestDriverLatest : public sample_driver::SampleDriver {
public:
- TestDriverLatest(const char* name, std::set<OperationType> supportedOperations,
+ TestDriverLatest(const char* name, std::set<V1_3::OperationType> supportedOperations,
AllocateReturn allocateReturn)
: SampleDriver(name),
kSupportedOperations(std::move(supportedOperations)),
kAllocateReturn(allocateReturn) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
// Faster than cpu.
- const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
- const Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
+ const V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
- cb(ErrorStatus::NONE, capabilities);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
// The tests will never use a referenced model.
CHECK(model.referenced.size() == 0);
std::vector<bool> supported(model.main.operations.size(), false);
- std::transform(
- model.main.operations.begin(), model.main.operations.end(), supported.begin(),
- [this](const Operation& op) { return kSupportedOperations.count(op.type) > 0; });
- cb(ErrorStatus::NONE, supported);
- return Void();
+ std::transform(model.main.operations.begin(), model.main.operations.end(),
+ supported.begin(), [this](const V1_3::Operation& op) {
+ return kSupportedOperations.count(op.type) > 0;
+ });
+ cb(V1_3::ErrorStatus::NONE, supported);
+ return hardware::Void();
}
- Return<void> allocate(const BufferDesc&, const hidl_vec<sp<IPreparedModel>>&,
- const hidl_vec<BufferRole>&, const hidl_vec<BufferRole>&,
- allocate_cb cb) override {
+ hardware::Return<void> allocate(const V1_3::BufferDesc&,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>&,
+ const hardware::hidl_vec<V1_3::BufferRole>&,
+ const hardware::hidl_vec<V1_3::BufferRole>&,
+ allocate_cb cb) override {
switch (kAllocateReturn) {
case AllocateReturn::OK:
- cb(ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::BAD_IBUFFER:
- cb(ErrorStatus::NONE, nullptr, mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, nullptr, mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::BAD_TOKEN:
- cb(ErrorStatus::NONE, new TestBuffer(), 0);
- return Void();
+ cb(V1_3::ErrorStatus::NONE, new TestBuffer(), 0);
+ return hardware::Void();
case AllocateReturn::BAD_STATUS:
- cb(ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
+ return hardware::Void();
case AllocateReturn::NOT_SUPPORTED:
- cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- return Void();
+ cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ return hardware::Void();
}
LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn);
- return Void();
+ return hardware::Void();
}
private:
- const std::set<OperationType> kSupportedOperations;
+ const std::set<V1_3::OperationType> kSupportedOperations;
const AllocateReturn kAllocateReturn;
uint32_t mValidBufferToken = 1;
};
@@ -160,7 +165,7 @@ void createTestModel(test_wrapper::Model* model) {
model->addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp});
model->addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1});
model->identifyInputsAndOutputs({input0, input1, input2}, {output0, output1});
- EXPECT_EQ(model->finish(), Result::NO_ERROR);
+ EXPECT_EQ(model->finish(), WrapperResult::NO_ERROR);
}
class MemoryDomainTestBase : public ::testing::Test {
@@ -199,14 +204,14 @@ class MemoryDomainTestBase : public ::testing::Test {
std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size());
std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(),
[&deviceMap](const std::string& name) { return deviceMap.at(name); });
- Result result;
+ WrapperResult result;
std::tie(result, compilation) =
test_wrapper::Compilation::createForDevices(&mModel, devices);
- EXPECT_EQ(result, Result::NO_ERROR);
+ EXPECT_EQ(result, WrapperResult::NO_ERROR);
} else {
compilation = test_wrapper::Compilation(&mModel);
}
- EXPECT_EQ(compilation.finish(), Result::NO_ERROR);
+ EXPECT_EQ(compilation.finish(), WrapperResult::NO_ERROR);
return compilation;
}
@@ -245,7 +250,8 @@ class MemoryDomainTest : public MemoryDomainTestBase,
public ::testing::WithParamInterface<MemoryDomainTestParam> {
protected:
// If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED.
- void createAndRegisterDriver(const char* name, std::set<OperationType> supportedOperations,
+ void createAndRegisterDriver(const char* name,
+ std::set<V1_3::OperationType> supportedOperations,
AllocateReturn allocateReturn) {
sp<V1_0::IDevice> driver;
if (kUseV1_2Driver) {
@@ -275,9 +281,10 @@ class MemoryDomainTest : public MemoryDomainTestBase,
// Test device memory allocation on a compilation with only a single partition.
TEST_P(MemoryDomainTest, SinglePartition) {
- createAndRegisterDriver("test_driver",
- {OperationType::ADD, OperationType::SUB, OperationType::MUL},
- kAllocateReturn);
+ createAndRegisterDriver(
+ "test_driver",
+ {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
+ kAllocateReturn);
auto compilation = createCompilation({"test_driver"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -285,7 +292,7 @@ TEST_P(MemoryDomainTest, SinglePartition) {
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
@@ -295,7 +302,7 @@ TEST_P(MemoryDomainTest, SinglePartition) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -311,9 +318,9 @@ TEST_P(MemoryDomainTest, SinglePartition) {
// Test device memory allocation on a compilation with multiple partitions.
TEST_P(MemoryDomainTest, MultiplePartitions) {
- createAndRegisterDriver("test_driver_add", {OperationType::ADD}, kAllocateReturn);
- createAndRegisterDriver("test_driver_sub", {OperationType::SUB}, kAllocateReturn);
- createAndRegisterDriver("test_driver_mul", {OperationType::MUL}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_add", {V1_3::OperationType::ADD}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_sub", {V1_3::OperationType::SUB}, kAllocateReturn);
+ createAndRegisterDriver("test_driver_mul", {V1_3::OperationType::MUL}, kAllocateReturn);
auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -323,7 +330,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
@@ -333,7 +340,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -357,7 +364,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -380,7 +387,7 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
} else {
// The memory should fallback to ashmem or blob ahwb based on the driver version.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_EQ(m->getIBuffer(), nullptr);
const auto& hidlMemory = m->getHidlMemory();
@@ -396,9 +403,10 @@ TEST_P(MemoryDomainTest, MultiplePartitions) {
// Test device memory allocation with dynamic shape.
TEST_P(MemoryDomainTest, DynamicShape) {
- createAndRegisterDriver("test_driver",
- {OperationType::ADD, OperationType::SUB, OperationType::MUL},
- kAllocateReturn);
+ createAndRegisterDriver(
+ "test_driver",
+ {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
+ kAllocateReturn);
auto compilation = createCompilation({"test_driver"});
ASSERT_NE(compilation.getHandle(), nullptr);
@@ -406,7 +414,7 @@ TEST_P(MemoryDomainTest, DynamicShape) {
if (kAllocateReturn == AllocateReturn::OK) {
// The memory should be backed by the IBuffer returned from the driver.
ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
- const Memory* m = reinterpret_cast<const Memory*>(memory.get());
+ const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
ASSERT_NE(m, nullptr);
EXPECT_NE(m->getIBuffer(), nullptr);
} else {
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index d85717ce7..939612a78 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -145,7 +145,11 @@
namespace {
-using namespace android::nn::hal;
+namespace hardware = android::hardware;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = ::android::nn::CompilationBuilder;
using Deadline = ::android::nn::Deadline;
using Device = ::android::nn::Device;
@@ -154,10 +158,13 @@ using ExecutePreference = ::android::nn::test_wrapper::ExecutePreference;
using ExecutePriority = ::android::nn::test_wrapper::ExecutePriority;
using ExecutionPlan = ::android::nn::ExecutionPlan;
using ExecutionStep = ::android::nn::ExecutionStep;
+using HalCacheToken = ::android::nn::HalCacheToken;
using HalVersion = ::android::nn::HalVersion;
using HidlModel = V1_3::Model;
using LogicalStep = ::android::nn::LogicalStep;
using ModelBuilder = ::android::nn::ModelBuilder;
+using Operand = ::android::nn::Operand;
+using Operation = ::android::nn::Operation;
using Result = ::android::nn::test_wrapper::Result;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
using WrapperCompilation = ::android::nn::test_wrapper::Compilation;
@@ -166,9 +173,10 @@ using WrapperModel = ::android::nn::test_wrapper::Model;
using WrapperOperandType = ::android::nn::test_wrapper::OperandType;
using WrapperSymmPerChannelQuantParams = ::android::nn::test_wrapper::SymmPerChannelQuantParams;
using WrapperType = ::android::nn::test_wrapper::Type;
+using android::sp;
-Capabilities makeCapabilities(float perf) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+V1_3::Capabilities makeCapabilities(float perf) {
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
.operandPerformance =
@@ -177,12 +185,12 @@ Capabilities makeCapabilities(float perf) {
.whilePerformance = perfInfo};
};
-void update(Capabilities* capabilities, OperandType type, float perf) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+void update(V1_3::Capabilities* capabilities, V1_3::OperandType type, float perf) {
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
::android::nn::update(&capabilities->operandPerformance, type, perfInfo);
}
-float lookupExecTime(const Capabilities& capabilities, OperandType type) {
+float lookupExecTime(const V1_3::Capabilities& capabilities, V1_3::OperandType type) {
return ::android::nn::lookup(capabilities.operandPerformance, type).execTime;
}
@@ -214,16 +222,16 @@ const uint32_t kFirstEncodingHARD_SWISH = kLastEncodingV1_2 + 1;
const uint32_t kFirstEncodingV1_3 = kFirstEncodingHARD_SWISH;
const uint32_t kLastEncodingV1_3 = kFirstEncodingHARD_SWISH;
-const std::map<OperationType, uint32_t> operationToFirstEncoding = {
- {OperationType::ADD, kFirstEncodingADD},
- {OperationType::MUL, kFirstEncodingMUL},
- {OperationType::DIV, kFirstEncodingDIV},
- {OperationType::SUB, kFirstEncodingSUB},
- {OperationType::MAXIMUM, kFirstEncodingMAXIMUM},
- {OperationType::MINIMUM, kFirstEncodingMINIMUM},
- {OperationType::POW, kFirstEncodingPOW},
- {OperationType::PRELU, kFirstEncodingPRELU},
- {OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH},
+const std::map<V1_3::OperationType, uint32_t> operationToFirstEncoding = {
+ {V1_3::OperationType::ADD, kFirstEncodingADD},
+ {V1_3::OperationType::MUL, kFirstEncodingMUL},
+ {V1_3::OperationType::DIV, kFirstEncodingDIV},
+ {V1_3::OperationType::SUB, kFirstEncodingSUB},
+ {V1_3::OperationType::MAXIMUM, kFirstEncodingMAXIMUM},
+ {V1_3::OperationType::MINIMUM, kFirstEncodingMINIMUM},
+ {V1_3::OperationType::POW, kFirstEncodingPOW},
+ {V1_3::OperationType::PRELU, kFirstEncodingPRELU},
+ {V1_3::OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH},
};
// Sorted in reverse order (std::greater) so that we can use map::lower_bound to
@@ -244,20 +252,20 @@ const std::map<uint32_t, std::pair<uint32_t, bool>, std::greater<>> firstEncodin
// Look up the operation with the specified index in a graph, and return the
// operation encoding; or, if for some reason this is not one of the encoded
// operations, then return kBadOperation.
-uint32_t lookupOperation(std::function<const Operation&(uint32_t)> getOperation,
- std::function<const Operand&(uint32_t)> getOperand,
+uint32_t lookupOperation(std::function<const V1_3::Operation&(uint32_t)> getOperation,
+ std::function<const V1_3::Operand&(uint32_t)> getOperand,
std::function<const uint8_t*(uint32_t)> getValue,
uint32_t operationIndex) {
- const Operation& operation = getOperation(operationIndex);
+ const V1_3::Operation& operation = getOperation(operationIndex);
switch (operation.type) {
- case OperationType::ADD:
- case OperationType::MUL:
- case OperationType::DIV:
- case OperationType::SUB: {
+ case V1_3::OperationType::ADD:
+ case V1_3::OperationType::MUL:
+ case V1_3::OperationType::DIV:
+ case V1_3::OperationType::SUB: {
// input2 is the fused activation function
- const Operand& input2 = getOperand(operation.inputs[2]);
- if ((input2.type == OperandType::INT32) &&
- (input2.lifetime == OperandLifeTime::CONSTANT_COPY)) {
+ const V1_3::Operand& input2 = getOperand(operation.inputs[2]);
+ if ((input2.type == V1_3::OperandType::INT32) &&
+ (input2.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY)) {
int32_t value;
CHECK_EQ(sizeof(value), input2.location.length);
memcpy(&value, getValue(input2.location.offset), input2.location.length);
@@ -276,11 +284,15 @@ uint32_t lookupOperation(std::function<const Operation&(uint32_t)> getOperation,
return kBadOperation;
}
-uint32_t lookupOperation(const HidlModel& model, const Subgraph& subgraph,
+uint32_t lookupOperation(const HidlModel& model, const V1_3::Subgraph& subgraph,
uint32_t operationIndex) {
return lookupOperation(
- [&subgraph](uint32_t index) -> const Operation& { return subgraph.operations[index]; },
- [&subgraph](uint32_t index) -> const Operand& { return subgraph.operands[index]; },
+ [&subgraph](uint32_t index) -> const V1_3::Operation& {
+ return subgraph.operations[index];
+ },
+ [&subgraph](uint32_t index) -> const V1_3::Operand& {
+ return subgraph.operands[index];
+ },
[&model](uint32_t offset) { return &model.operandValues[offset]; }, operationIndex);
}
@@ -288,12 +300,11 @@ uint32_t lookupOperation(const HidlModel& model, const Subgraph& subgraph,
// This is a debugging utility function
void dump(const char* name, const ModelBuilder* model) {
const HidlModel hidlModel = model->makeHidlModel();
- std::cout << name << ": " << toString(hidlModel) << std::endl;
- std::cout << "inputs: " << toString(hidlModel.main.inputIndexes) << std::endl;
- std::cout << "outputs: " << toString(hidlModel.main.outputIndexes) << std::endl;
+ std::cout << name << ": " << hidlModel << std::endl;
+ std::cout << "inputs: " << hidlModel.main.inputIndexes << std::endl;
+ std::cout << "outputs: " << hidlModel.main.outputIndexes << std::endl;
for (size_t i = 0, e = hidlModel.main.operations.size(); i < e; i++) {
- std::cout << "operation[" << i << "]: " << toString(hidlModel.main.operations[i])
- << std::endl;
+ std::cout << "operation[" << i << "]: " << hidlModel.main.operations[i] << std::endl;
}
}
#endif
@@ -313,37 +324,39 @@ class PartitioningDriver : public SampleDriver {
OEMYes, // accepted by getSupportedOperations and prepareModel
};
- PartitioningDriver(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriver(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask, OEM oem = OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: SampleDriver(name),
mVersionString(version),
mCapabilities(capabilities),
mOperationMask(operationMask),
mOEM(oem),
mOperationTypes(std::move(operationTypes)) {
- CHECK_EQ(mOperationTypes.count(OperationType::OEM_OPERATION), size_t(0));
+ CHECK_EQ(mOperationTypes.count(V1_3::OperationType::OEM_OPERATION), size_t(0));
if (operationMask) {
- std::for_each(mOperationTypes.begin(), mOperationTypes.end(), [](OperationType type) {
- CHECK_EQ(operationToFirstEncoding.count(type), size_t(0));
- });
+ std::for_each(mOperationTypes.begin(), mOperationTypes.end(),
+ [](V1_3::OperationType type) {
+ CHECK_EQ(operationToFirstEncoding.count(type), size_t(0));
+ });
}
}
~PartitioningDriver() override {}
- Return<void> getVersionString(getVersionString_cb cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb cb) override {
cb(V1_0::ErrorStatus::NONE, mVersionString);
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& callback) override {
if (mOEM == OEMIndecisive) {
for (const auto& operation : model.main.operations) {
- if (operation.type == OperationType::OEM_OPERATION) {
+ if (operation.type == V1_3::OperationType::OEM_OPERATION) {
callback->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
return V1_3::ErrorStatus::INVALID_ARGUMENT;
}
@@ -354,7 +367,7 @@ class PartitioningDriver : public SampleDriver {
V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
auto ret = getSupportedOperations_1_3(
model, [&outStatus](V1_3::ErrorStatus inStatus,
- const hidl_vec<bool>& supportedOperations) {
+ const hardware::hidl_vec<bool>& supportedOperations) {
if (inStatus == V1_3::ErrorStatus::NONE) {
if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
[](bool v) { return v; })) {
@@ -371,57 +384,60 @@ class PartitioningDriver : public SampleDriver {
}
}
- Return<DeviceStatus> getStatus() override { return DeviceStatus::AVAILABLE; }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override {
+ return V1_0::DeviceStatus::AVAILABLE;
+ }
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, mCapabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const Model& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
- return Void();
+ return hardware::Void();
}
cb(V1_3::ErrorStatus::NONE, getSupportedOperationsForSubgraph(model, model.main));
- return Void();
+ return hardware::Void();
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override {
cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/1, /*numDataCache=*/1);
- return Void();
+ return hardware::Void();
}
private:
- std::vector<bool> getSupportedOperationsForSubgraph(const Model& model,
- const Subgraph& subgraph) {
+ std::vector<bool> getSupportedOperationsForSubgraph(const V1_3::Model& model,
+ const V1_3::Subgraph& subgraph) {
CHECK(&subgraph == &model.main ||
std::find_if(model.referenced.begin(), model.referenced.end(),
- [&subgraph](const Subgraph& refSubgraph) {
+ [&subgraph](const V1_3::Subgraph& refSubgraph) {
return &subgraph == &refSubgraph;
}) != model.referenced.end());
auto supportsEntireSubgraph = [this, &model, &subgraph](uint32_t refSubgraphOperandIndex) {
CHECK_LT(refSubgraphOperandIndex, subgraph.operands.size());
- const Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex];
- CHECK(refSubgraphOperand.lifetime == OperandLifeTime::SUBGRAPH);
+ const V1_3::Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex];
+ CHECK(refSubgraphOperand.lifetime == V1_3::OperandLifeTime::SUBGRAPH);
CHECK_LT(refSubgraphOperand.location.offset, model.referenced.size());
- const Subgraph& refSubgraph = model.referenced[refSubgraphOperand.location.offset];
+ const V1_3::Subgraph& refSubgraph =
+ model.referenced[refSubgraphOperand.location.offset];
std::vector<bool> supported = getSupportedOperationsForSubgraph(model, refSubgraph);
return std::all_of(supported.begin(), supported.end(), [](bool x) { return x; });
};
const size_t count = subgraph.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; i++) {
- const Operation& operation = subgraph.operations[i];
+ const V1_3::Operation& operation = subgraph.operations[i];
if (mOperationTypes.count(operation.type)) {
- if (operation.type == OperationType::IF) {
+ if (operation.type == V1_3::OperationType::IF) {
namespace op = android::nn::operation_if;
CHECK_GE(operation.inputs.size(), op::kFirstInput);
supported[i] =
supportsEntireSubgraph(operation.inputs[op::kThenModelOperand]) &&
supportsEntireSubgraph(operation.inputs[op::kElseModelOperand]);
- } else if (operation.type == OperationType::WHILE) {
+ } else if (operation.type == V1_3::OperationType::WHILE) {
namespace op = android::nn::operation_while;
CHECK_GE(operation.inputs.size(), op::kFirstInput);
supported[i] =
@@ -432,7 +448,7 @@ class PartitioningDriver : public SampleDriver {
}
continue;
}
- if (operation.type == OperationType::OEM_OPERATION) {
+ if (operation.type == V1_3::OperationType::OEM_OPERATION) {
supported[i] = (mOEM != OEMNo);
continue;
}
@@ -447,72 +463,75 @@ class PartitioningDriver : public SampleDriver {
}
std::string mVersionString;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
uint32_t mOperationMask;
OEM mOEM;
- std::set<OperationType> mOperationTypes;
+ std::set<V1_3::OperationType> mOperationTypes;
};
// Like PartitioningDriver, but implementing 1.2
class PartitioningDriverV1_2 : public V1_2::IDevice {
public:
- PartitioningDriverV1_2(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_2(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -525,33 +544,33 @@ class PartitioningDriverV1_2 : public V1_2::IDevice {
// Like PartitioningDriver, but implementing 1.1
class PartitioningDriverV1_1 : public V1_1::IDevice {
public:
- PartitioningDriverV1_1(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_1(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -564,25 +583,25 @@ class PartitioningDriverV1_1 : public V1_1::IDevice {
// Like PartitioningDriver, but implementing 1.0
class PartitioningDriverV1_0 : public V1_0::IDevice {
public:
- PartitioningDriverV1_0(const char* name, const char* version, Capabilities capabilities,
+ PartitioningDriverV1_0(const char* name, const char* version, V1_3::Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem,
operationTypes)) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
@@ -949,7 +968,7 @@ class PartitioningTest : public ::testing::Test {
// From a vector of DeviceSpecification, create a vector of
// Devices.
struct DeviceSpecification {
- DeviceSpecification(const std::string& name, const Capabilities& capabilities,
+ DeviceSpecification(const std::string& name, const V1_3::Capabilities& capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
: mName(name),
@@ -959,30 +978,31 @@ class PartitioningTest : public ::testing::Test {
mOEM(oem) {}
DeviceSpecification(const std::string& name, float perf, uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, perf, perf, operationMask, oem, operationTypes) {}
DeviceSpecification(const std::string& name, float perf, float perfRelaxed,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, kVersionString, perf, perfRelaxed, operationMask, oem,
operationTypes) {}
DeviceSpecification(const std::string& name, const std::string& version, float perf,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: DeviceSpecification(name, version, perf, perf, operationMask, oem, operationTypes) {}
DeviceSpecification(const std::string& name, const std::string& version, float perf,
float perfRelaxed, uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo,
- std::set<OperationType> operationTypes = {})
+ std::set<V1_3::OperationType> operationTypes = {})
: mName(name),
mVersionString(version),
mOperationMask(operationMask),
mOEM(oem),
mOperationTypes(std::move(operationTypes)) {
- PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
- PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed, .powerUsage = perfRelaxed};
+ V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
+ V1_0::PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed,
+ .powerUsage = perfRelaxed};
mCapabilities = {
.relaxedFloat32toFloat16PerformanceScalar = perfRelaxedInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfRelaxedInfo,
@@ -1004,11 +1024,11 @@ class PartitioningTest : public ::testing::Test {
std::string mName;
std::string mVersionString;
- Capabilities mCapabilities;
+ V1_3::Capabilities mCapabilities;
HalVersion mHalVersion = HalVersion::LATEST;
uint32_t mOperationMask;
PartitioningDriver::OEM mOEM = PartitioningDriver::OEMNo;
- std::set<OperationType> mOperationTypes;
+ std::set<V1_3::OperationType> mOperationTypes;
static constexpr char kVersionString[] = "JUST_AN_EXAMPLE";
@@ -1137,7 +1157,7 @@ class PartitioningTest : public ::testing::Test {
// actual definitions
ASSERT_LT(model->operationCount(), kPseudoDefiningOperationBase);
for (uint32_t i = 0, e = model->operationCount(); i < e; i++) {
- const Operation& operation = model->getOperation(i);
+ const V1_3::Operation& operation = android::nn::convertToV1_3(model->getOperation(i));
for (uint32_t output : operation.outputs) {
(*defMap)[output] = i;
}
@@ -1149,12 +1169,12 @@ class PartitioningTest : public ::testing::Test {
}
// look for NO_VALUE and CONSTANT_COPY
for (uint32_t i = 0, e = model->operandCount(); i < e; i++) {
- const Operand& operand = model->getOperand(i);
+ const V1_3::Operand& operand = android::nn::convertToV1_3(model->getOperand(i));
switch (operand.lifetime) {
- case OperandLifeTime::NO_VALUE:
+ case V1_3::OperandLifeTime::NO_VALUE:
(*defMap)[i] = kPseudoDefiningOperationNoValue;
break;
- case OperandLifeTime::CONSTANT_COPY: {
+ case V1_3::OperandLifeTime::CONSTANT_COPY: {
ASSERT_EQ(operand.location.length, sizeof(uint32_t));
uint32_t value;
memcpy(&value, model->getPointerToOperandValue(operand.location.offset),
@@ -1163,9 +1183,9 @@ class PartitioningTest : public ::testing::Test {
(*defMap)[i] = kPseudoDefiningOperationConstantCopy0 + value;
break;
}
- case OperandLifeTime::TEMPORARY_VARIABLE:
- case OperandLifeTime::SUBGRAPH_INPUT:
- case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case V1_3::OperandLifeTime::TEMPORARY_VARIABLE:
+ case V1_3::OperandLifeTime::SUBGRAPH_INPUT:
+ case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT:
// already handled
break;
default:
@@ -1207,7 +1227,6 @@ class PartitioningTest : public ::testing::Test {
bool compare(const Operand& operandA, const Operand& operandB) {
if (operandA.type != operandB.type || operandA.dimensions != operandB.dimensions ||
- operandA.numberOfConsumers != operandB.numberOfConsumers ||
operandA.scale != operandB.scale || operandA.zeroPoint != operandB.zeroPoint) {
return false;
}
@@ -2021,8 +2040,8 @@ TEST_F(PartitioningTest, Perf) {
// WrapperOperandType is the NeuralNetworksWrapper.h representation of a
// full operand type (WrapperType plus dimensions plus other attributes).
- auto TestType = [](OperandType operandType) {
- if (operandType == OperandType::SUBGRAPH) {
+ auto TestType = [](V1_3::OperandType operandType) {
+ if (operandType == V1_3::OperandType::SUBGRAPH) {
// SUBGRAPH capabilities are handled differently.
return;
}
@@ -2037,11 +2056,11 @@ TEST_F(PartitioningTest, Perf) {
model.finish();
ASSERT_TRUE(model.isValid());
- const Capabilities baseCapabilities = makeCapabilities(0.5);
+ const V1_3::Capabilities baseCapabilities = makeCapabilities(0.5);
{
// better than base
- Capabilities goodCapabilities = baseCapabilities;
+ V1_3::Capabilities goodCapabilities = baseCapabilities;
update(&goodCapabilities, operandType, 0.25);
const auto devices =
@@ -2062,7 +2081,7 @@ TEST_F(PartitioningTest, Perf) {
{
// worse than base
- Capabilities badCapabilities = baseCapabilities;
+ V1_3::Capabilities badCapabilities = baseCapabilities;
update(&badCapabilities, operandType, 0.75);
const auto devices =
makeDevices({{"base", baseCapabilities, ~0U, PartitioningDriver::OEMYes},
@@ -2081,13 +2100,13 @@ TEST_F(PartitioningTest, Perf) {
}
};
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- TestType(static_cast<OperandType>(type));
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ TestType(static_cast<V1_3::OperandType>(type));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- TestType(static_cast<OperandType>(type));
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ TestType(static_cast<V1_3::OperandType>(type));
}
}
@@ -2167,8 +2186,9 @@ void DynamicTemporariesTest::compileModelAndComparePlan() {
ASSERT_TRUE(mModel.has_value());
ASSERT_TRUE(!mCompilation.has_value());
- auto devices = makeDevices({{"fill", 0.9, 0U, PartitioningDriver::OEMNo, {OperationType::FILL}},
- {"add", 0.9, 0U, PartitioningDriver::OEMNo, {OperationType::ADD}}});
+ auto devices =
+ makeDevices({{"fill", 0.9, 0U, PartitioningDriver::OEMNo, {V1_3::OperationType::FILL}},
+ {"add", 0.9, 0U, PartitioningDriver::OEMNo, {V1_3::OperationType::ADD}}});
mCompilation = PartitioningCompilation(&mModel.value(), devices);
ASSERT_EQ(mCompilation->setPartitioning(DeviceManager::kPartitioningWithoutFallback),
@@ -2824,44 +2844,44 @@ class PerfTest : public ::testing::Test {};
TEST_F(PerfTest, Lookup) {
// Derive an arbitrary (but reproducible) performance value from an OperandType.
// We'll use this to ensure that we can save and then recover a type's performance.
- auto typePerf = [](OperandType type) { return float(static_cast<uint32_t>(type)); };
+ auto typePerf = [](V1_3::OperandType type) { return float(static_cast<uint32_t>(type)); };
- Capabilities capabilities = makeCapabilities(-1.0f);
+ V1_3::Capabilities capabilities = makeCapabilities(-1.0f);
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
update(&capabilities, operandType, typePerf(operandType));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
update(&capabilities, operandType, typePerf(operandType));
}
// Make sure lookup retrieves the values stored by update
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
- if (operandType == OperandType::SUBGRAPH) {
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
+ if (operandType == V1_3::OperandType::SUBGRAPH) {
// SUBGRAPH capabilities are handled differently.
continue;
}
SCOPED_TRACE(toString(operandType));
EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType));
}
- for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN);
- type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) {
- OperandType operandType = static_cast<OperandType>(type);
+ for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN);
+ type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) {
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type);
SCOPED_TRACE(toString(operandType));
EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType));
}
// Check the behavior of a missing type
- OperandType operandType =
- static_cast<OperandType>(static_cast<uint32_t>(OperandTypeRange::BASE_MAX) + 1);
+ V1_3::OperandType operandType = static_cast<V1_3::OperandType>(
+ static_cast<uint32_t>(V1_3::OperandTypeRange::BASE_MAX) + 1);
EXPECT_EQ(lookupExecTime(capabilities, operandType), FLT_MAX);
}
@@ -3005,7 +3025,7 @@ TEST_F(ControlFlowPartitioningTest, IF_SimplePlan) {
// The device supports all operations.
const auto devices =
- makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}});
+ makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {V1_3::OperationType::IF}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3023,7 +3043,7 @@ TEST_F(ControlFlowPartitioningTest, WHILE_SimplePlan) {
0.9,
~0U,
PartitioningDriver::OEMNo,
- {OperationType::WHILE, OperationType::EQUAL}}});
+ {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3047,7 +3067,7 @@ void ControlFlowPartitioningTest::testIfUnknownSize(Dimensioned dimensionedMain,
// The device supports all operations but the partitioner ignores its IF
// support due to http://b/159076604#comment5.
const auto devices =
- makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}});
+ makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {V1_3::OperationType::IF}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
@@ -3090,7 +3110,7 @@ void ControlFlowPartitioningTest::testWhileUnknownSize(Dimensioned dimensionedMa
0.9,
~0U,
PartitioningDriver::OEMNo,
- {OperationType::WHILE, OperationType::EQUAL}}});
+ {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}});
ExecutionPlan plan;
ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER,
diff --git a/nn/runtime/test/TestPartitioningRandom.cpp b/nn/runtime/test/TestPartitioningRandom.cpp
index 51d7910cc..294d93ad5 100644
--- a/nn/runtime/test/TestPartitioningRandom.cpp
+++ b/nn/runtime/test/TestPartitioningRandom.cpp
@@ -95,11 +95,15 @@
namespace android {
-using namespace nn::hal;
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
using CompilationBuilder = nn::CompilationBuilder;
-using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
+using Device = nn::Device;
using ExecutionPlan = nn::ExecutionPlan;
+using HalCacheToken = nn::HalCacheToken;
using HalVersion = nn::HalVersion;
using HidlModel = V1_3::Model;
using ModelBuilder = nn::ModelBuilder;
@@ -335,7 +339,7 @@ class RandomPartitioningTest : public ::testing::TestWithParam<unsigned> {
public:
RandomPartitioningTest() : mRandNumEng(GetParam() /* seed */), mRandNumUnitDist(0.0, 1.0) {}
- static Signature getSignature(const HidlModel& model, const Operation& operation);
+ static Signature getSignature(const HidlModel& model, const V1_3::Operation& operation);
protected:
static V1_0::IDevice* makeTestDriver(HalVersion version, const char* name,
@@ -500,7 +504,8 @@ HalVersion RandomPartitioningTest::getMinHalVersion(ANeuralNetworksOperationType
return kOperationToVersion.at(type);
}
-Signature RandomPartitioningTest::getSignature(const HidlModel& model, const Operation& operation) {
+Signature RandomPartitioningTest::getSignature(const HidlModel& model,
+ const V1_3::Operation& operation) {
static const auto kOperationToActivation = [] {
std::map<ANeuralNetworksOperationType, int> result;
for (const auto& pattern : kOperationPatterns) {
@@ -516,9 +521,10 @@ Signature RandomPartitioningTest::getSignature(const HidlModel& model, const Ope
return Signature(operationType, -1);
}
- const Operand& operand = model.main.operands[operation.inputs[activationFunctionInputIndex]];
- CHECK(operand.lifetime == OperandLifeTime::CONSTANT_COPY);
- CHECK(operand.type == OperandType::INT32);
+ const V1_3::Operand& operand =
+ model.main.operands[operation.inputs[activationFunctionInputIndex]];
+ CHECK(operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY);
+ CHECK(operand.type == V1_3::OperandType::INT32);
int32_t value;
memcpy(&value, &model.operandValues[operand.location.offset], operand.location.length);
return Signature(operationType, value);
@@ -546,21 +552,21 @@ class TestDriver : public SampleDriver {
TestDriver(const char* name, std::set<Signature> signatures)
: SampleDriver(name), mSignatures(std::move(signatures)) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
- const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
- Capabilities capabilities = {
+ const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nn::nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
_hidl_cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
- Return<void> getSupportedOperations_1_3(const HidlModel& model,
- getSupportedOperations_1_3_cb cb) override {
+ hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
@@ -572,19 +578,20 @@ class TestDriver : public SampleDriver {
} else {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
}
- return Void();
+ return hardware::Void();
}
- Return<V1_3::ErrorStatus> prepareModel_1_3(
- const HidlModel& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ hardware::Return<V1_3::ErrorStatus> prepareModel_1_3(
+ const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_3::IPreparedModelCallback>& callback) override {
// NOTE: We verify that all operations in the model are supported.
V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT;
auto ret = getSupportedOperations_1_3(
model, [&outStatus](V1_3::ErrorStatus inStatus,
- const hidl_vec<bool>& supportedOperations) {
+ const hardware::hidl_vec<bool>& supportedOperations) {
if (inStatus == V1_3::ErrorStatus::NONE) {
if (std::all_of(supportedOperations.begin(), supportedOperations.end(),
[](bool v) { return v; })) {
@@ -610,57 +617,60 @@ class TestDriverV1_2 : public V1_2::IDevice {
public:
TestDriverV1_2(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_2(_hidl_cb);
}
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_2(
+ const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_2(
- const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
const sp<V1_2::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
actualCallback);
}
- Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
return mLatestDriver->getVersionString(_hidl_cb);
}
- Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
- Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ hardware::Return<void> getType(getType_cb _hidl_cb) override {
+ return mLatestDriver->getType(_hidl_cb);
+ }
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
return mLatestDriver->getSupportedExtensions(_hidl_cb);
}
- Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ hardware::Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -675,27 +685,27 @@ class TestDriverV1_1 : public V1_1::IDevice {
public:
TestDriverV1_1(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
@@ -710,19 +720,19 @@ class TestDriverV1_0 : public V1_0::IDevice {
public:
TestDriverV1_0(const char* name, std::set<Signature> signatures)
: mLatestDriver(new TestDriver(name, std::move(signatures))) {}
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mLatestDriver->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
const sp<V1_3::IDevice> mLatestDriver;
diff --git a/nn/runtime/test/TestRemoveDefaultArguments.cpp b/nn/runtime/test/TestRemoveDefaultArguments.cpp
index 8726adc85..daef6bf60 100644
--- a/nn/runtime/test/TestRemoveDefaultArguments.cpp
+++ b/nn/runtime/test/TestRemoveDefaultArguments.cpp
@@ -98,7 +98,6 @@ const test_helper::TestModel& get_test_model_align_corners_2x2_to_1x1();
namespace android::nn {
namespace {
-using namespace hal;
using sample_driver::SampleDriverPartial;
using Result = test_wrapper::Result;
using WrapperOperandType = test_wrapper::OperandType;
@@ -113,18 +112,18 @@ class TestDriver : public SampleDriverPartial {
public:
TestDriver() : SampleDriverPartial(kTestDriverName) {}
- Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */});
- return Void();
+ return hardware::Void();
}
void setSupportedInputCount(uint32_t count) { mSupportedInputCount = count; }
private:
- std::vector<bool> getSupportedOperationsImpl(const Model& model) const override {
+ std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override {
std::vector<bool> supported(model.main.operations.size());
std::transform(model.main.operations.begin(), model.main.operations.end(),
- supported.begin(), [this](const Operation& operation) {
+ supported.begin(), [this](const V1_3::Operation& operation) {
SCOPED_TRACE("operation = " + toString(operation.type));
EXPECT_EQ(operation.inputs.size(), mSupportedInputCount);
return operation.inputs.size() == mSupportedInputCount;
diff --git a/nn/runtime/test/TestUnspecifiedDimensions.cpp b/nn/runtime/test/TestUnspecifiedDimensions.cpp
index c1bad04a8..5a2287c78 100644
--- a/nn/runtime/test/TestUnspecifiedDimensions.cpp
+++ b/nn/runtime/test/TestUnspecifiedDimensions.cpp
@@ -17,7 +17,10 @@
#include "TestNeuralNetworksWrapper.h"
#include <sys/mman.h>
+#include <memory>
+#include <string>
#include <tuple>
+#include <utility>
#include <vector>
#include <android-base/macros.h>
diff --git a/nn/runtime/test/TestVersionedInterfaces.cpp b/nn/runtime/test/TestVersionedInterfaces.cpp
index 6d1306d57..b4f32bcde 100644
--- a/nn/runtime/test/TestVersionedInterfaces.cpp
+++ b/nn/runtime/test/TestVersionedInterfaces.cpp
@@ -22,6 +22,7 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <hidl/Status.h>
+#include <nnapi/TypeUtils.h>
#include <utils/Errors.h>
#include <limits>
@@ -37,7 +38,6 @@
namespace android::nn {
namespace {
-using namespace hal;
using testing::_;
using testing::Invoke;
using testing::InvokeWithoutArgs;
@@ -45,40 +45,59 @@ using testing::MockFunction;
using MockDeviceFactory = MockFunction<sp<V1_0::IDevice>(bool blocking)>;
constexpr uint32_t kNoCacheFilesNeeded = 0;
-constexpr uint32_t kMaxNumberOfCacheFiles =
- static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES);
-constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
- .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kNoTiming12 = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
+ .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
+constexpr Timing kNoTiming = {};
template <typename... Args>
auto makeCallbackReturn(Args&&... args) {
return [argPack = std::make_tuple(std::forward<Args>(args)...)](const auto& cb) {
std::apply(cb, argPack);
- return Void();
+ return hardware::Void();
};
};
-class MockDevice : public IDevice {
+class MockDevice : public V1_3::IDevice {
public:
static sp<MockDevice> create() {
const sp<MockDevice> mockDevice = new MockDevice();
- const auto linkToDeathRet_ret = []() -> Return<bool> { return true; };
- const auto getCapabilities_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_0::Capabilities{});
+ const auto linkToDeathRet_ret = []() -> hardware::Return<bool> { return true; };
+ const auto getCapabilities_ret = makeCallbackReturn(
+ V1_0::ErrorStatus::NONE, V1_0::Capabilities{
+ .float32Performance = kNoPerformanceInfo,
+ .quantized8Performance = kNoPerformanceInfo,
+ });
const auto getCapabilities_1_1_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_1::Capabilities{});
+ makeCallbackReturn(V1_0::ErrorStatus::NONE,
+ V1_1::Capabilities{
+ .float32Performance = kNoPerformanceInfo,
+ .quantized8Performance = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16Performance = kNoPerformanceInfo,
+ });
const auto getVersionString_ret =
makeCallbackReturn(V1_0::ErrorStatus::NONE, "Google-MockV1");
- const auto getType_ret = makeCallbackReturn(V1_0::ErrorStatus::NONE, DeviceType::OTHER);
- const auto getCapabilities_1_2_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::Capabilities{});
+ const auto getType_ret =
+ makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::DeviceType::OTHER);
+ const auto getCapabilities_1_2_ret = makeCallbackReturn(
+ V1_0::ErrorStatus::NONE,
+ V1_2::Capabilities{
+ .relaxedFloat32toFloat16PerformanceScalar = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = kNoPerformanceInfo,
+ });
const auto getSupportedExtensions_ret =
- makeCallbackReturn(V1_0::ErrorStatus::NONE, hidl_vec<Extension>{});
+ makeCallbackReturn(V1_0::ErrorStatus::NONE, hardware::hidl_vec<V1_2::Extension>{});
const auto getNumberOfCacheFilesNeeded_ret = makeCallbackReturn(
V1_0::ErrorStatus::NONE, kMaxNumberOfCacheFiles, kMaxNumberOfCacheFiles);
- const auto getCapabilities_1_3_ret =
- makeCallbackReturn(V1_3::ErrorStatus::NONE, V1_3::Capabilities{});
+ const auto getCapabilities_1_3_ret = makeCallbackReturn(
+ V1_3::ErrorStatus::NONE,
+ V1_3::Capabilities{
+ .relaxedFloat32toFloat16PerformanceScalar = kNoPerformanceInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = kNoPerformanceInfo,
+ .ifPerformance = kNoPerformanceInfo,
+ .whilePerformance = kNoPerformanceInfo,
+ });
ON_CALL(*mockDevice, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret));
ON_CALL(*mockDevice, getCapabilities(_)).WillByDefault(Invoke(getCapabilities_ret));
@@ -108,73 +127,82 @@ class MockDevice : public IDevice {
}
// IBase methods below.
- Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient,
- uint64_t /*cookie*/) override {
+ hardware::Return<bool> linkToDeath(const sp<hardware::hidl_death_recipient>& recipient,
+ uint64_t /*cookie*/) override {
mDeathRecipient = recipient;
return linkToDeathRet();
}
- MOCK_METHOD(Return<void>, ping, (), (override));
+ MOCK_METHOD(hardware::Return<void>, ping, (), (override));
// V1_0 methods below.
- MOCK_METHOD(Return<void>, getCapabilities, (getCapabilities_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities, (getCapabilities_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations,
(const V1_0::Model& model, getSupportedOperations_cb cb), (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel,
(const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<DeviceStatus>, getStatus, (), (override));
+ MOCK_METHOD(hardware::Return<V1_0::DeviceStatus>, getStatus, (), (override));
// V1_1 methods below.
- MOCK_METHOD(Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_1,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_1,
(const V1_1::Model& model, getSupportedOperations_1_1_cb cb), (override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_1,
- (const V1_1::Model& model, ExecutionPreference preference,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel_1_1,
+ (const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& callback),
(override));
// V1_2 methods below.
- MOCK_METHOD(Return<void>, getVersionString, (getVersionString_cb cb), (override));
- MOCK_METHOD(Return<void>, getType, (getType_cb cb), (override));
- MOCK_METHOD(Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_2,
- (const V1_2::Model& model, getSupportedOperations_1_2_cb cb), (override));
- MOCK_METHOD(Return<void>, getNumberOfCacheFilesNeeded, (getNumberOfCacheFilesNeeded_cb cb),
+ MOCK_METHOD(hardware::Return<void>, getVersionString, (getVersionString_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getType, (getType_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb),
(override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_2,
- (const V1_2::Model& model, ExecutionPreference preference,
- const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_2,
+ (const V1_2::Model& model, getSupportedOperations_1_2_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getNumberOfCacheFilesNeeded,
+ (getNumberOfCacheFilesNeeded_cb cb), (override));
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModel_1_2,
+ (const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModelFromCache,
- (const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, prepareModelFromCache,
+ (const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback),
(override));
// V1_3 methods below.
- MOCK_METHOD(Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb), (override));
- MOCK_METHOD(Return<void>, getSupportedOperations_1_3,
+ MOCK_METHOD(hardware::Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb),
+ (override));
+ MOCK_METHOD(hardware::Return<void>, getSupportedOperations_1_3,
(const V1_3::Model& model, getSupportedOperations_1_3_cb cb), (override));
- MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModel_1_3,
- (const V1_3::Model& model, ExecutionPreference preference, Priority priority,
- const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<V1_3::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, prepareModel_1_3,
+ (const V1_3::Model& model, V1_1::ExecutionPreference preference,
+ V1_3::Priority priority, const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3,
- (const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
- const sp<V1_3::IPreparedModelCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3,
+ (const V1_3::OptionalTimePoint& deadline,
+ const hardware::hidl_vec<hardware::hidl_handle>& modelCache,
+ const hardware::hidl_vec<hardware::hidl_handle>& dataCache,
+ const HalCacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, allocate,
- (const BufferDesc& desc, const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
- const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles,
- allocate_cb cb),
+ MOCK_METHOD(hardware::Return<void>, allocate,
+ (const V1_3::BufferDesc& desc,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hardware::hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb),
(override));
// Helper methods.
- MOCK_METHOD(Return<bool>, linkToDeathRet, ());
+ MOCK_METHOD(hardware::Return<bool>, linkToDeathRet, ());
void simulateCrash() {
ASSERT_NE(nullptr, mDeathRecipient.get());
@@ -189,15 +217,15 @@ class MockDevice : public IDevice {
private:
// Members.
- sp<hidl_death_recipient> mDeathRecipient;
+ sp<hardware::hidl_death_recipient> mDeathRecipient;
};
-class MockPreparedModel : public IPreparedModel {
+class MockPreparedModel : public V1_3::IPreparedModel {
public:
static sp<MockPreparedModel> create() {
const sp<MockPreparedModel> mockPreparedModel = new MockPreparedModel();
- const auto linkToDeathRet_ret = []() -> Return<bool> { return true; };
+ const auto linkToDeathRet_ret = []() -> hardware::Return<bool> { return true; };
ON_CALL(*mockPreparedModel, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret));
// This EXPECT_CALL(...).Times(testing::AnyNumber()) calls are to
@@ -208,27 +236,28 @@ class MockPreparedModel : public IPreparedModel {
}
// IBase methods below.
- Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient,
- uint64_t /*cookie*/) override {
+ hardware::Return<bool> linkToDeath(const sp<hardware::hidl_death_recipient>& recipient,
+ uint64_t /*cookie*/) override {
mDeathRecipient = recipient;
return linkToDeathRet();
}
- MOCK_METHOD(Return<void>, ping, (), (override));
+ MOCK_METHOD(hardware::Return<void>, ping, (), (override));
// V1_0 methods below.
- MOCK_METHOD(Return<V1_0::ErrorStatus>, execute,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, execute,
(const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback),
(override));
// V1_2 methods below.
- MOCK_METHOD(Return<V1_0::ErrorStatus>, execute_1_2,
- (const V1_0::Request& request, MeasureTiming measure,
+ MOCK_METHOD(hardware::Return<V1_0::ErrorStatus>, execute_1_2,
+ (const V1_0::Request& request, V1_2::MeasureTiming measure,
const sp<V1_2::IExecutionCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, executeSynchronously,
- (const V1_0::Request& request, MeasureTiming measure, executeSynchronously_cb cb),
+ MOCK_METHOD(hardware::Return<void>, executeSynchronously,
+ (const V1_0::Request& request, V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb),
(override));
- MOCK_METHOD(Return<void>, configureExecutionBurst,
+ MOCK_METHOD(hardware::Return<void>, configureExecutionBurst,
(const sp<V1_2::IBurstCallback>& callback,
const hardware::MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
@@ -236,27 +265,28 @@ class MockPreparedModel : public IPreparedModel {
(override));
// V1_3 methods below.
- MOCK_METHOD(Return<ErrorStatus>, execute_1_3,
- (const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const sp<IExecutionCallback>& callback),
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, execute_1_3,
+ (const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback),
(override));
- MOCK_METHOD(Return<void>, executeSynchronously_1_3,
- (const V1_3::Request& request, MeasureTiming measure,
- const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
+ MOCK_METHOD(hardware::Return<void>, executeSynchronously_1_3,
+ (const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
executeSynchronously_1_3_cb cb),
(override));
- MOCK_METHOD(Return<void>, executeFenced,
- (const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor,
- MeasureTiming measure, const OptionalTimePoint& deadline,
- const OptionalTimeoutDuration& loopTimeoutDuration,
- const OptionalTimeoutDuration& duration, executeFenced_cb cb),
+ MOCK_METHOD(hardware::Return<void>, executeFenced,
+ (const V1_3::Request& request,
+ const hardware::hidl_vec<hardware::hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb),
(override));
// Helper methods.
- MOCK_METHOD(Return<bool>, linkToDeathRet, ());
+ MOCK_METHOD(hardware::Return<bool>, linkToDeathRet, ());
void simulateCrash() {
ASSERT_NE(nullptr, mDeathRecipient.get());
@@ -271,27 +301,29 @@ class MockPreparedModel : public IPreparedModel {
private:
// Members.
- sp<hidl_death_recipient> mDeathRecipient;
+ sp<hardware::hidl_death_recipient> mDeathRecipient;
};
class MockBurstContext : public V1_2::IBurstContext {
public:
// V1_2 methods below.
- MOCK_METHOD(Return<void>, freeMemory, (int32_t slot), (override));
+ MOCK_METHOD(hardware::Return<void>, freeMemory, (int32_t slot), (override));
};
-class MockFencedExecutionCallback : public IFencedExecutionCallback {
+class MockFencedExecutionCallback : public V1_3::IFencedExecutionCallback {
public:
// V1_3 methods below.
- MOCK_METHOD(Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override));
+ MOCK_METHOD(hardware::Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override));
};
-class MockBuffer : public IBuffer {
+class MockBuffer : public V1_3::IBuffer {
public:
// V1_3 methods below.
- MOCK_METHOD(Return<ErrorStatus>, copyTo, (const hidl_memory& dst), (override));
- MOCK_METHOD(Return<ErrorStatus>, copyFrom,
- (const hidl_memory& src, const hidl_vec<uint32_t>& dimensions), (override));
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, copyTo, (const hardware::hidl_memory& dst),
+ (override));
+ MOCK_METHOD(hardware::Return<V1_3::ErrorStatus>, copyFrom,
+ (const hardware::hidl_memory& src, const hardware::hidl_vec<uint32_t>& dimensions),
+ (override));
};
enum class Version { V1_0, V1_1, V1_2, V1_3, MOCK };
@@ -315,18 +347,19 @@ sp<V1_0::IDevice> adaptAs(const sp<MockDevice>& mockDevice, Version version) {
auto makePreparedModelReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
- return [launchStatus, returnStatus, preparedModel](
- const V1_0::Model& /*model*/,
- const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ return [launchStatus, returnStatus, preparedModel](const V1_0::Model& /*model*/,
+ const sp<V1_0::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus, preparedModel).isOk();
return launchStatus;
};
}
auto makePreparedModel_1_1Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
- return [launchStatus, returnStatus, preparedModel](
- const V1_1::Model& /*model*/, ExecutionPreference /*preference*/,
- const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ return [launchStatus, returnStatus, preparedModel](const V1_1::Model& /*model*/,
+ V1_1::ExecutionPreference /*preference*/,
+ const sp<V1_0::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -334,9 +367,10 @@ auto makePreparedModel_1_1Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStat
auto makePreparedModel_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
- const V1_2::Model& /*model*/, ExecutionPreference /*preference*/,
+ const V1_2::Model& /*model*/, V1_1::ExecutionPreference /*preference*/,
const auto& /*modelCache*/, const auto& /*dataCache*/, const auto& /*token*/,
- const sp<V1_2::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const sp<V1_2::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_0::ErrorStatus> {
cb->notify_1_2(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -344,11 +378,12 @@ auto makePreparedModel_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStat
auto makePreparedModel_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus,
const sp<MockPreparedModel>& preparedModel) {
return [launchStatus, returnStatus, preparedModel](
- const V1_3::Model& /*model*/, ExecutionPreference /*preference*/,
- Priority /*priority*/, const OptionalTimePoint& /*deadline*/,
- const hidl_vec<hidl_handle>& /*modelCache*/,
- const hidl_vec<hidl_handle>& /*dataCache*/, const CacheToken& /*token*/,
- const sp<V1_3::IPreparedModelCallback>& cb) -> Return<V1_3::ErrorStatus> {
+ const V1_3::Model& /*model*/, V1_1::ExecutionPreference /*preference*/,
+ V1_3::Priority /*priority*/, const V1_3::OptionalTimePoint& /*deadline*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*modelCache*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*dataCache*/,
+ const HalCacheToken& /*token*/, const sp<V1_3::IPreparedModelCallback>& cb)
+ -> hardware::Return<V1_3::ErrorStatus> {
cb->notify_1_3(returnStatus, preparedModel).isOk();
return launchStatus;
};
@@ -357,51 +392,53 @@ auto makePreparedModel_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStat
auto makeExecuteReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus) {
return [launchStatus, returnStatus](
const V1_0::Request& /*request*/,
- const sp<V1_0::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const sp<V1_0::IExecutionCallback>& cb) -> hardware::Return<V1_0::ErrorStatus> {
cb->notify(returnStatus);
return launchStatus;
};
}
auto makeExecute_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus,
- const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [launchStatus, returnStatus, outputShapes, timing](
- const V1_0::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const sp<V1_2::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> {
+ const V1_0::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const sp<V1_2::IExecutionCallback>& cb) -> hardware::Return<V1_0::ErrorStatus> {
cb->notify_1_2(returnStatus, outputShapes, timing);
return launchStatus;
};
}
auto makeExecute_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus,
- const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [launchStatus, returnStatus, outputShapes, timing](
- const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
- const sp<V1_3::IExecutionCallback>& cb) -> Return<V1_3::ErrorStatus> {
+ const V1_3::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const sp<V1_3::IExecutionCallback>& cb) -> hardware::Return<V1_3::ErrorStatus> {
cb->notify_1_3(returnStatus, outputShapes, timing);
return launchStatus;
};
}
auto makeExecuteSynchronouslyReturn(V1_0::ErrorStatus status,
- const std::vector<OutputShape>& outputShapes,
- const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [status, outputShapes, timing](const V1_0::Request& /*request*/,
- MeasureTiming /*measureTiming*/,
+ V1_2::MeasureTiming /*measureTiming*/,
const V1_2::IPreparedModel::executeSynchronously_cb& cb) {
cb(status, outputShapes, timing);
- return Void();
+ return hardware::Void();
};
}
auto makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus status,
- const std::vector<OutputShape>& outputShapes,
- const Timing& timing) {
+ const std::vector<V1_2::OutputShape>& outputShapes,
+ const V1_2::Timing& timing) {
return [status, outputShapes, timing](
- const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/,
- const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const V1_3::Request& /*request*/, V1_2::MeasureTiming /*measureTiming*/,
+ const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
const V1_3::IPreparedModel::executeSynchronously_1_3_cb& cb) {
cb(status, outputShapes, timing);
- return Void();
+ return hardware::Void();
};
}
auto makeConfigureExecutionBurst(V1_0::ErrorStatus status,
@@ -412,19 +449,20 @@ auto makeConfigureExecutionBurst(V1_0::ErrorStatus status,
const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/,
V1_2::IPreparedModel::configureExecutionBurst_cb cb) {
cb(status, burstContext);
- return Void();
+ return hardware::Void();
};
}
-auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hidl_handle& syncFence,
- const sp<IFencedExecutionCallback>& dispatchCallback) {
+auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hardware::hidl_handle& syncFence,
+ const sp<V1_3::IFencedExecutionCallback>& dispatchCallback) {
return [status, syncFence, dispatchCallback](
- const V1_3::Request& /*request*/, const hidl_vec<hidl_handle>& /*waitFor*/,
- MeasureTiming /*measure*/, const OptionalTimePoint& /*deadline*/,
- const OptionalTimeoutDuration& /*loopTimeoutDuration*/,
- const OptionalTimeoutDuration& /*duration*/,
+ const V1_3::Request& /*request*/,
+ const hardware::hidl_vec<hardware::hidl_handle>& /*waitFor*/,
+ V1_2::MeasureTiming /*measure*/, const V1_3::OptionalTimePoint& /*deadline*/,
+ const V1_3::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+ const V1_3::OptionalTimeoutDuration& /*duration*/,
V1_3::IPreparedModel::executeFenced_cb cb) {
cb(status, syncFence, dispatchCallback);
- return Void();
+ return hardware::Void();
};
}
@@ -516,7 +554,7 @@ std::shared_ptr<VersionedIDevice> makeVersionedIDeviceFrom(const sp<MockDevice>&
const auto device = adaptAs(mockDevice, version);
ON_CALL(*mockDeviceFactory, Call(_)).WillByDefault(testing::Return(device));
EXPECT_CALL(*mockDeviceFactory, Call(/*blocking=*/true)).Times(testing::AtLeast(1));
- const DeviceFactory makeDevice = mockDeviceFactory->AsStdFunction();
+ const HalDeviceFactory makeDevice = mockDeviceFactory->AsStdFunction();
return VersionedIDevice::create("MockDevice", makeDevice);
}
@@ -566,7 +604,7 @@ class VersionedIDeviceMockTest : public VersionedIDeviceInitializedTest<Version:
TEST_F(VersionedIDeviceInitializationTest, creationFailure) {
// setup failure
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(nullptr));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -581,7 +619,7 @@ TEST_F(VersionedIDeviceInitializationTest, linkToDeathTransportFailure) {
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -592,10 +630,10 @@ TEST_F(VersionedIDeviceInitializationTest, linkToDeathTransportFailure) {
TEST_F(VersionedIDeviceInitializationTest, linkToDeathReturnError) {
// setup failure
- const auto ret = []() -> Return<bool> { return false; };
+ const auto ret = []() -> hardware::Return<bool> { return false; };
EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice));
EXPECT_CALL(*kMockDevice, linkToDeathRet()).Times(1).WillOnce(InvokeWithoutArgs(ret));
- const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
+ const HalDeviceFactory makeDevice = kMockMakeDevice->AsStdFunction();
// run test
const auto device = VersionedIDevice::create("MockDevice", makeDevice);
@@ -666,7 +704,8 @@ TEST_F(VersionedIDeviceInitializationTest, getVersionStringFailure) {
TEST_F(VersionedIDeviceInitializationTest, getTypeFailure) {
// setup failure
- const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, DeviceType::OTHER);
+ const auto ret =
+ makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_2::DeviceType::OTHER);
EXPECT_CALL(*kMockDevice, getType(_)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -678,7 +717,8 @@ TEST_F(VersionedIDeviceInitializationTest, getTypeFailure) {
TEST_F(VersionedIDeviceInitializationTest, getSupportedExtensionsFailure) {
// setup failure
- const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, hidl_vec<Extension>{});
+ const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE,
+ hardware::hidl_vec<V1_2::Extension>{});
EXPECT_CALL(*kMockDevice, getSupportedExtensions(_)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -839,9 +879,11 @@ TEST_F(VersionedIDeviceV1_0Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_LT(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_LT(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -851,9 +893,11 @@ TEST_F(VersionedIDeviceV1_1Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_LT(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_LT(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -863,9 +907,11 @@ TEST_F(VersionedIDeviceV1_2Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_EQ(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_EQ(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -875,9 +921,11 @@ TEST_F(VersionedIDeviceV1_3Test, getCapabilities) {
const auto cached = kDevice->getCapabilities();
// verify success
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar);
- EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor);
- EXPECT_EQ(0u, capabilities.operandPerformance.size());
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceScalar);
+ EXPECT_EQ(Capabilities::PerformanceInfo{},
+ capabilities.relaxedFloat32toFloat16PerformanceTensor);
+ EXPECT_EQ(0u, capabilities.operandPerformance.asVector().size());
EXPECT_EQ(cached, capabilities);
}
@@ -1107,16 +1155,16 @@ TEST_F(VersionedIDeviceV1_0Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1124,16 +1172,16 @@ TEST_F(VersionedIDeviceV1_1Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1141,16 +1189,16 @@ TEST_F(VersionedIDeviceV1_2Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1158,16 +1206,16 @@ TEST_F(VersionedIDeviceV1_3Test, getSupportedOperations) {
// setup call
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_3::ErrorStatus::NONE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify success
- EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode);
+ EXPECT_EQ(ErrorStatus::NONE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1179,7 +1227,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModel) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1195,7 +1243,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModel) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1211,7 +1259,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModel) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1229,7 +1277,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModel) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1271,13 +1319,14 @@ TEST_F(VersionedIDeviceV1_3Test, allocate) {
// setup call
const sp<MockBuffer> mockBuffer = new MockBuffer();
constexpr uint32_t mockToken = 1;
- const auto ret = [mockBuffer](const BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<BufferRole>& /*inputRoles*/,
- const hidl_vec<BufferRole>& /*outputRoles*/,
- V1_3::IDevice::allocate_cb cb) -> Return<void> {
+ const auto ret = [mockBuffer](
+ const V1_3::BufferDesc& /*desc*/,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ V1_3::IDevice::allocate_cb cb) -> hardware::Return<void> {
cb(V1_3::ErrorStatus::NONE, mockBuffer, mockToken);
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
@@ -1292,7 +1341,7 @@ TEST_F(VersionedIDeviceV1_3Test, allocate) {
TEST_F(VersionedIDeviceMockTest, wait) {
// setup call
- const auto ret = []() -> Return<void> { return {}; };
+ const auto ret = []() -> hardware::Return<void> { return {}; };
EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(Invoke(ret));
// run test
@@ -1308,16 +1357,16 @@ TEST_F(VersionedIDeviceV1_0Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1325,16 +1374,16 @@ TEST_F(VersionedIDeviceV1_1Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1342,16 +1391,16 @@ TEST_F(VersionedIDeviceV1_2Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1359,16 +1408,16 @@ TEST_F(VersionedIDeviceV1_3Test, getSupportedOperationsFailure) {
// setup failure
const auto ret = [](const auto& /*model*/, const auto cb) {
cb(V1_3::ErrorStatus::GENERAL_FAILURE, {});
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1380,7 +1429,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelLaunchFailure) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1396,7 +1445,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelLaunchFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1412,7 +1461,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelLaunchFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1430,7 +1479,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelLaunchFailure) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1446,7 +1495,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelReturnFailure) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1462,7 +1511,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelReturnFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1478,7 +1527,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelReturnFailure) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1496,7 +1545,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelReturnFailure) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1512,7 +1561,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelNullptrError) {
EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1528,7 +1577,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelNullptrError) {
EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1544,7 +1593,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelNullptrError) {
EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1562,7 +1611,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelNullptrError) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1572,13 +1621,13 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelNullptrError) {
TEST_F(VersionedIDeviceV1_3Test, allocateFailure) {
// setup failure
- const auto ret = [](const BufferDesc& /*desc*/,
- const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
- const hidl_vec<BufferRole>& /*inputRoles*/,
- const hidl_vec<BufferRole>& /*outputRoles*/,
- V1_3::IDevice::allocate_cb cb) -> Return<void> {
+ const auto ret = [](const V1_3::BufferDesc& /*desc*/,
+ const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hardware::hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ V1_3::IDevice::allocate_cb cb) -> hardware::Return<void> {
cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
- return Void();
+ return hardware::Void();
};
EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
@@ -1600,11 +1649,11 @@ TEST_F(VersionedIDeviceV1_0Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1615,11 +1664,11 @@ TEST_F(VersionedIDeviceV1_1Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1630,11 +1679,11 @@ TEST_F(VersionedIDeviceV1_2Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1645,11 +1694,11 @@ TEST_F(VersionedIDeviceV1_3Test, getSupportedOperationsTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const auto metaModel = MetaModel({}, /*strictSlicing=*/true);
+ const auto metaModel = MetaModel(Model{}, /*strictSlicing=*/true);
const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel);
// verify failure
- EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode);
+ EXPECT_EQ(ErrorStatus::GENERAL_FAILURE, resultCode);
EXPECT_EQ(0u, supportedOperations.size());
}
@@ -1660,7 +1709,7 @@ TEST_F(VersionedIDeviceV1_0Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1675,7 +1724,7 @@ TEST_F(VersionedIDeviceV1_1Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1690,7 +1739,7 @@ TEST_F(VersionedIDeviceV1_2Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1705,7 +1754,7 @@ TEST_F(VersionedIDeviceV1_3Test, prepareModelTransportFailure) {
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1767,7 +1816,7 @@ TEST_F(VersionedIDeviceMockTest, DISABLED_prepareModelRecoverCrash) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify success
@@ -1788,7 +1837,7 @@ TEST_F(VersionedIDeviceMockTest, prepareModelFullCrash) {
.WillOnce(testing::Return(nullptr));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1798,7 +1847,7 @@ TEST_F(VersionedIDeviceMockTest, prepareModelFullCrash) {
TEST_F(VersionedIDeviceMockTest, prepareModelAsyncCrash) {
// setup failure
- const auto ret = [this]() -> Return<V1_3::ErrorStatus> {
+ const auto ret = [this]() -> hardware::Return<V1_3::ErrorStatus> {
kMockDevice->simulateCrash();
return V1_3::ErrorStatus::NONE;
};
@@ -1807,7 +1856,7 @@ TEST_F(VersionedIDeviceMockTest, prepareModelAsyncCrash) {
.WillOnce(InvokeWithoutArgs(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1842,7 +1891,7 @@ TEST_F(VersionedIDeviceMockTest, waitRecoverCrash) {
.WillOnce(testing::Return(mockRecoveredDevice));
// setup recovered device calls
- const auto ret = []() -> Return<bool> { return true; };
+ const auto ret = []() -> hardware::Return<bool> { return true; };
EXPECT_CALL(*mockRecoveredDevice, linkToDeathRet()).Times(1).WillOnce(Invoke(ret));
// run test
@@ -1903,7 +1952,7 @@ std::shared_ptr<VersionedIPreparedModel> makeVersionedIPreparedModelSuccessfulIn
EXPECT_CALL(*mockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(testing::AnyNumber());
EXPECT_CALL(*mockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)).Times(testing::AnyNumber());
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = device.prepareModel(makeModel, {}, {}, {}, {}, {});
CHECK_EQ(ANEURALNETWORKS_NO_ERROR, resultCode);
@@ -1948,7 +1997,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathTransportFailure) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1968,7 +2017,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathDeadObject) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -1980,7 +2029,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathReturnError) {
// setup failure
EXPECT_CALL(*kMockPreparedModel, linkToDeathRet())
.Times(1)
- .WillOnce(InvokeWithoutArgs([]() -> Return<bool> { return false; }));
+ .WillOnce(InvokeWithoutArgs([]() -> hardware::Return<bool> { return false; }));
const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE,
kMockPreparedModel);
EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _))
@@ -1988,7 +2037,7 @@ TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathReturnError) {
.WillOnce(Invoke(ret));
// run test
- const ModelFactory makeModel = [] { return V1_3::Model{}; };
+ const ModelFactory makeModel = [] { return Model{}; };
const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {});
// verify failure
@@ -2030,8 +2079,8 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeAsync) {
TEST_F(VersionedIPreparedModelV1_2Test, executeAsync) {
// setup call
- const auto ret =
- makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {},
+ kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2046,8 +2095,8 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeAsync) {
TEST_F(VersionedIPreparedModelV1_3Test, executeAsync) {
// setup call
- const auto ret =
- makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {},
+ kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2092,7 +2141,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executePreferSync) {
TEST_F(VersionedIPreparedModelV1_2Test, executePreferSync) {
// setup call
- const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2107,7 +2156,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executePreferSync) {
TEST_F(VersionedIPreparedModelV1_3Test, executePreferSync) {
// setup call
- const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(ret));
@@ -2156,7 +2205,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFenced) {
TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) {
// setup call
- const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2173,8 +2222,8 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) {
TEST_F(VersionedIPreparedModelV1_3Test, executeFenced) {
// setup call
auto memory = allocateSharedMemory(4);
- hidl_handle fakeSyncFence(memory.handle());
- const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
+ hardware::hidl_handle fakeSyncFence(memory.handle());
+ const sp<V1_3::IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
const auto ret = makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, fakeSyncFence, callback);
EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
@@ -2276,7 +2325,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncLaunchFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchFailure) {
// setup failure
const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::GENERAL_FAILURE,
- V1_0::ErrorStatus::NONE, {}, kNoTiming);
+ V1_0::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2292,7 +2341,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncLaunchFailure) {
// setup failure
const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE,
- V1_3::ErrorStatus::NONE, {}, kNoTiming);
+ V1_3::ErrorStatus::NONE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2338,7 +2387,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncReturnFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncReturnFailure) {
// setup failure
const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE,
- V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2354,7 +2403,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncReturnFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncReturnFailure) {
// setup failure
const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE,
- V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2402,7 +2451,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executePreferSyncFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2418,7 +2467,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(ret));
@@ -2470,7 +2519,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFencedFailure) {
TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) {
// setup failure
const auto ret =
- makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+ makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12);
EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret));
// run test
@@ -2487,8 +2536,8 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) {
TEST_F(VersionedIPreparedModelV1_3Test, executeFencedFailure) {
// setup failure
auto memory = allocateSharedMemory(4);
- hidl_handle fakeSyncFence(memory.handle());
- const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
+ hardware::hidl_handle fakeSyncFence(memory.handle());
+ const sp<V1_3::IFencedExecutionCallback> callback = new MockFencedExecutionCallback();
const auto ret =
makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, fakeSyncFence, callback);
EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _))
@@ -2894,7 +2943,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncCrash) {
TEST_F(VersionedIPreparedModelMockTest, executeAsyncReturnCrash) {
// setup failure
- const auto ret = [this]() -> Return<V1_3::ErrorStatus> {
+ const auto ret = [this]() -> hardware::Return<V1_3::ErrorStatus> {
kMockPreparedModel->simulateCrash();
return V1_3::ErrorStatus::NONE;
};
diff --git a/nn/runtime/test/android_fuzzing/Converter.cpp b/nn/runtime/test/android_fuzzing/Converter.cpp
index ca853aefc..c2fc354fa 100644
--- a/nn/runtime/test/android_fuzzing/Converter.cpp
+++ b/nn/runtime/test/android_fuzzing/Converter.cpp
@@ -29,39 +29,38 @@ namespace android::nn::fuzz {
namespace {
using namespace test_helper;
-using namespace android_nn_fuzz;
constexpr uint32_t kMaxSize = 65536;
-TestOperandType convert(OperandType type) {
+TestOperandType convert(android_nn_fuzz::OperandType type) {
return static_cast<TestOperandType>(type);
}
-TestOperationType convert(OperationType type) {
+TestOperationType convert(android_nn_fuzz::OperationType type) {
return static_cast<TestOperationType>(type);
}
-TestOperandLifeTime convert(OperandLifeTime lifetime) {
+TestOperandLifeTime convert(android_nn_fuzz::OperandLifeTime lifetime) {
return static_cast<TestOperandLifeTime>(lifetime);
}
-std::vector<float> convert(const Scales& scales) {
+std::vector<float> convert(const android_nn_fuzz::Scales& scales) {
const auto& repeatedScale = scales.scale();
return std::vector<float>(repeatedScale.begin(), repeatedScale.end());
}
-TestSymmPerChannelQuantParams convert(const SymmPerChannelQuantParams& params) {
+TestSymmPerChannelQuantParams convert(const android_nn_fuzz::SymmPerChannelQuantParams& params) {
std::vector<float> scales = convert(params.scales());
const uint32_t channelDim = params.channel_dim();
return {.scales = std::move(scales), .channelDim = channelDim};
}
-std::vector<uint32_t> convert(const Dimensions& dimensions) {
+std::vector<uint32_t> convert(const android_nn_fuzz::Dimensions& dimensions) {
const auto& repeatedDimension = dimensions.dimension();
return std::vector<uint32_t>(repeatedDimension.begin(), repeatedDimension.end());
}
-TestBuffer convert(size_t size, const Buffer& buffer) {
+TestBuffer convert(size_t size, const android_nn_fuzz::Buffer& buffer) {
if (size == 0) {
return TestBuffer();
}
@@ -70,7 +69,7 @@ TestBuffer convert(size_t size, const Buffer& buffer) {
return TestBuffer::createRandom(size % kMaxSize, &generator);
}
-TestOperand convert(const Operand& operand) {
+TestOperand convert(const android_nn_fuzz::Operand& operand) {
const TestOperandType type = convert(operand.type());
std::vector<uint32_t> dimensions = convert(operand.dimensions());
const float scale = operand.scale();
@@ -79,7 +78,7 @@ TestOperand convert(const Operand& operand) {
auto channelQuant = convert(operand.channel_quant());
const bool isIgnored = false;
- const auto halType = static_cast<hal::OperandType>(type);
+ const auto halType = static_cast<V1_3::OperandType>(type);
const bool willOverflow = nonExtensionOperandSizeOfDataOverflowsUInt32(halType, dimensions);
const bool makeEmpty = (lifetime == TestOperandLifeTime::NO_VALUE ||
lifetime == TestOperandLifeTime::TEMPORARY_VARIABLE || willOverflow);
@@ -97,7 +96,7 @@ TestOperand convert(const Operand& operand) {
.data = std::move(data)};
}
-std::vector<TestOperand> convert(const Operands& operands) {
+std::vector<TestOperand> convert(const android_nn_fuzz::Operands& operands) {
std::vector<TestOperand> testOperands;
testOperands.reserve(operands.operand_size());
const auto& repeatedOperand = operands.operand();
@@ -106,19 +105,19 @@ std::vector<TestOperand> convert(const Operands& operands) {
return testOperands;
}
-std::vector<uint32_t> convert(const Indexes& indexes) {
+std::vector<uint32_t> convert(const android_nn_fuzz::Indexes& indexes) {
const auto& repeatedIndex = indexes.index();
return std::vector<uint32_t>(repeatedIndex.begin(), repeatedIndex.end());
}
-TestOperation convert(const Operation& operation) {
+TestOperation convert(const android_nn_fuzz::Operation& operation) {
const TestOperationType type = convert(operation.type());
std::vector<uint32_t> inputs = convert(operation.inputs());
std::vector<uint32_t> outputs = convert(operation.outputs());
return {.type = type, .inputs = std::move(inputs), .outputs = std::move(outputs)};
}
-std::vector<TestOperation> convert(const Operations& operations) {
+std::vector<TestOperation> convert(const android_nn_fuzz::Operations& operations) {
std::vector<TestOperation> testOperations;
testOperations.reserve(operations.operation_size());
const auto& repeatedOperation = operations.operation();
@@ -142,7 +141,7 @@ void calculateNumberOfConsumers(const std::vector<TestOperation>& operations,
std::for_each(operations.begin(), operations.end(), addAllConsumers);
}
-TestModel convert(const Model& model) {
+TestModel convert(const android_nn_fuzz::Model& model) {
std::vector<TestOperand> operands = convert(model.operands());
std::vector<TestOperation> operations = convert(model.operations());
std::vector<uint32_t> inputIndexes = convert(model.input_indexes());
@@ -161,7 +160,7 @@ TestModel convert(const Model& model) {
} // anonymous namespace
-TestModel convertToTestModel(const Test& model) {
+TestModel convertToTestModel(const android_nn_fuzz::Test& model) {
return convert(model.model());
}
diff --git a/nn/runtime/test/android_fuzzing/FuzzHarness.cpp b/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
index 3d787d68f..76c34a75a 100644
--- a/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
+++ b/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
@@ -31,7 +31,7 @@ namespace {
using ::android::nn::nonExtensionOperandSizeOfDataOverflowsUInt32;
using ::android::nn::fuzz::convertToTestModel;
-using ::android::nn::hal::OperandType;
+using ::android::nn::V1_3::OperandType;
using ::test_helper::TestModel;
using ::test_helper::TestOperand;
diff --git a/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp b/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp
index 2f72b9da4..783b66092 100644
--- a/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp
+++ b/nn/runtime/test/android_fuzzing/GenerateCorpus.cpp
@@ -41,8 +41,8 @@ OperationType convert(TestOperationType type) {
return static_cast<OperationType>(type);
}
-OperandLifeTime convert(TestOperandLifeTime lifetime) {
- return static_cast<OperandLifeTime>(lifetime);
+Operand::LifeTime convert(TestOperandLifeTime lifetime) {
+ return static_cast<Operand::LifeTime>(lifetime);
}
Scales convert(const std::vector<float>& scales) {
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
index c48829867..66023c1db 100644
--- a/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -20,6 +20,7 @@
#include <vector>
+#include <nnapi/Types.h>
#include "FibonacciExtension.h"
#include "HalInterfaces.h"
#include "NeuralNetworksExtensions.h"
@@ -33,10 +34,7 @@ namespace nn {
namespace sample_driver {
namespace {
-using namespace hal;
-
-const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE);
-const uint32_t kTypeWithinExtensionMask = (1 << kLowBitsType) - 1;
+const uint32_t kTypeWithinExtensionMask = (1 << kExtensionTypeBits) - 1;
namespace fibonacci_op {
@@ -48,22 +46,22 @@ constexpr uint32_t kInputN = 0;
constexpr uint32_t kNumOutputs = 1;
constexpr uint32_t kOutputTensor = 0;
-bool getFibonacciExtensionPrefix(const Model& model, uint16_t* prefix) {
+bool getFibonacciExtensionPrefix(const V1_3::Model& model, uint16_t* prefix) {
NN_RET_CHECK_EQ(model.extensionNameToPrefix.size(), 1u); // Assumes no other extensions in use.
NN_RET_CHECK_EQ(model.extensionNameToPrefix[0].name, EXAMPLE_FIBONACCI_EXTENSION_NAME);
*prefix = model.extensionNameToPrefix[0].prefix;
return true;
}
-bool isFibonacciOperation(const Operation& operation, const Model& model) {
+bool isFibonacciOperation(const V1_3::Operation& operation, const V1_3::Model& model) {
int32_t operationType = static_cast<int32_t>(operation.type);
uint16_t prefix;
NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
- NN_RET_CHECK_EQ(operationType, (prefix << kLowBitsType) | EXAMPLE_FIBONACCI);
+ NN_RET_CHECK_EQ(operationType, (prefix << kExtensionTypeBits) | EXAMPLE_FIBONACCI);
return true;
}
-bool validate(const Operation& operation, const Model& model) {
+bool validate(const V1_3::Operation& operation, const V1_3::Model& model) {
NN_RET_CHECK(isFibonacciOperation(operation, model));
NN_RET_CHECK_EQ(operation.inputs.size(), kNumInputs);
NN_RET_CHECK_EQ(operation.outputs.size(), kNumOutputs);
@@ -71,9 +69,9 @@ bool validate(const Operation& operation, const Model& model) {
int32_t outputType = static_cast<int32_t>(model.main.operands[operation.outputs[0]].type);
uint16_t prefix;
NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix));
- NN_RET_CHECK(inputType == ((prefix << kLowBitsType) | EXAMPLE_INT64) ||
+ NN_RET_CHECK(inputType == ((prefix << kExtensionTypeBits) | EXAMPLE_INT64) ||
inputType == ANEURALNETWORKS_TENSOR_FLOAT32);
- NN_RET_CHECK(outputType == ((prefix << kLowBitsType) | EXAMPLE_TENSOR_QUANT64_ASYMM) ||
+ NN_RET_CHECK(outputType == ((prefix << kExtensionTypeBits) | EXAMPLE_TENSOR_QUANT64_ASYMM) ||
outputType == ANEURALNETWORKS_TENSOR_FLOAT32);
return true;
}
@@ -128,7 +126,7 @@ bool execute(IOperationExecutionContext* context) {
uint64_t* output = context->getOutputBuffer<uint64_t>(kOutputTensor);
Shape outputShape = context->getOutputShape(kOutputTensor);
auto outputQuant = reinterpret_cast<const ExampleQuant64AsymmParams*>(
- outputShape.extraParams.extension().data());
+ std::get<Operand::ExtensionParams>(outputShape.extraParams).data());
return compute(n, outputQuant->scale, outputQuant->zeroPoint, output);
}
}
@@ -142,14 +140,14 @@ const OperationRegistration* FibonacciOperationResolver::findOperation(
static OperationRegistration operationRegistration(operationType, fibonacci_op::kOperationName,
nullptr, fibonacci_op::prepare,
fibonacci_op::execute, {});
- uint16_t prefix = static_cast<int32_t>(operationType) >> kLowBitsType;
+ uint16_t prefix = static_cast<int32_t>(operationType) >> kExtensionTypeBits;
uint16_t typeWithinExtension = static_cast<int32_t>(operationType) & kTypeWithinExtensionMask;
// Assumes no other extensions in use.
return prefix != 0 && typeWithinExtension == EXAMPLE_FIBONACCI ? &operationRegistration
: nullptr;
}
-Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
+hardware::Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) {
cb(V1_0::ErrorStatus::NONE,
{
{
@@ -169,44 +167,44 @@ Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb c
},
},
});
- return Void();
+ return hardware::Void();
}
-Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+hardware::Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
- static const PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
- Capabilities capabilities = {
+ static const V1_0::PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
+ V1_3::Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
.ifPerformance = kPerf,
.whilePerformance = kPerf};
cb(V1_3::ErrorStatus::NONE, capabilities);
- return Void();
+ return hardware::Void();
}
-Return<void> FibonacciDriver::getSupportedOperations_1_3(const V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) {
+hardware::Return<void> FibonacciDriver::getSupportedOperations_1_3(
+ const V1_3::Model& model, getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (!validateModel(model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
const size_t count = model.main.operations.size();
std::vector<bool> supported(count);
for (size_t i = 0; i < count; ++i) {
- const Operation& operation = model.main.operations[i];
+ const V1_3::Operation& operation = model.main.operations[i];
if (fibonacci_op::isFibonacciOperation(operation, model)) {
if (!fibonacci_op::validate(operation, model)) {
cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {});
- return Void();
+ return hardware::Void();
}
supported[i] = true;
}
}
cb(V1_3::ErrorStatus::NONE, supported);
- return Void();
+ return hardware::Void();
}
} // namespace sample_driver
diff --git a/nn/runtime/test/fibonacci_extension/FibonacciDriver.h b/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
index 303edd809..7daf4d2de 100644
--- a/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
+++ b/nn/runtime/test/fibonacci_extension/FibonacciDriver.h
@@ -34,7 +34,7 @@ class FibonacciOperationResolver : public IOperationResolver {
return &instance;
}
- const OperationRegistration* findOperation(hal::OperationType operationType) const override;
+ const OperationRegistration* findOperation(OperationType operationType) const override;
private:
FibonacciOperationResolver() {}
@@ -45,10 +45,10 @@ class FibonacciOperationResolver : public IOperationResolver {
class FibonacciDriver : public SampleDriver {
public:
FibonacciDriver() : SampleDriver(kDriverName, FibonacciOperationResolver::get()) {}
- hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
- hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
- hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
- getSupportedOperations_1_3_cb cb) override;
+ hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
+ hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
static constexpr char kDriverName[] = "sample-driver-fibonacci-extension";
};
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 9799ca00c..8ba763aad 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -44,7 +44,7 @@ using namespace test_helper;
RandomOperand::RandomOperand(const OperandSignature& operand, TestOperandType dataType,
uint32_t rank)
: type(operand.type), finalizer(operand.finalizer) {
- NN_FUZZER_LOG << "Operand: " << toString(type);
+ NN_FUZZER_LOG << "Operand: " << type;
if (operand.constructor) operand.constructor(dataType, rank, this);
}
@@ -81,7 +81,7 @@ size_t RandomOperand::getBufferSize() const {
// Construct a RandomOperation from OperationSignature.
RandomOperation::RandomOperation(const OperationSignature& operation)
: opType(operation.opType), finalizer(operation.finalizer) {
- NN_FUZZER_LOG << "Operation: " << toString(opType);
+ NN_FUZZER_LOG << "Operation: " << opType;
// Determine the data type and rank of the operation and invoke the constructor.
TestOperandType dataType = getRandomChoice(operation.supportedDataTypes);
@@ -294,14 +294,14 @@ TestModel RandomGraph::createTestModel() {
// Set model operations.
for (auto& operation : mOperations) {
- NN_FUZZER_LOG << "Operation: " << toString(operation.opType);
+ NN_FUZZER_LOG << "Operation: " << operation.opType;
TestOperation testOperation = {.type = static_cast<TestOperationType>(operation.opType)};
for (auto& op : operation.inputs) {
- NN_FUZZER_LOG << toString(*op);
+ NN_FUZZER_LOG << *op;
testOperation.inputs.push_back(op->opIndex);
}
for (auto& op : operation.outputs) {
- NN_FUZZER_LOG << toString(*op);
+ NN_FUZZER_LOG << *op;
testOperation.outputs.push_back(op->opIndex);
}
testModel.main.operations.push_back(std::move(testOperation));
diff --git a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index 1aa7fea41..8faae1271 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -119,18 +119,13 @@ class LoggerStream {
};
template <typename T>
-inline std::string toString(const T& obj) {
- return std::to_string(obj);
-}
-
-template <typename T>
inline std::string joinStr(const std::string& joint, const std::vector<T>& items) {
std::stringstream ss;
for (uint32_t i = 0; i < items.size(); i++) {
if (i == 0) {
- ss << toString(items[i]);
+ ss << items[i];
} else {
- ss << joint << toString(items[i]);
+ ss << joint << items[i];
}
}
return ss.str();
@@ -150,18 +145,15 @@ template <typename T>
inline std::string joinStr(const std::string& joint, int limit, const std::vector<T>& items) {
if (items.size() > static_cast<size_t>(limit)) {
std::vector<T> topMax(items.begin(), items.begin() + limit);
- return joinStr(joint, topMax) + ", (" + toString(items.size() - limit) + " ommited), " +
- toString(items.back());
+ std::stringstream ss;
+ ss << joinStr(joint, topMax) << ", (" << (items.size() - limit) << " omitted), "
+ << items.back();
+ return ss.str();
} else {
return joinStr(joint, items);
}
}
-static const char* kLifeTimeNames[6] = {
- "TEMPORARY_VARIABLE", "SUBGRAPH_INPUT", "SUBGRAPH_OUTPUT",
- "CONSTANT_COPY", "CONSTANT_REFERENCE", "NO_VALUE",
-};
-
static const bool kScalarDataType[]{
true, // ANEURALNETWORKS_FLOAT32
true, // ANEURALNETWORKS_INT32
@@ -198,10 +190,9 @@ static const uint32_t kSizeOfDataType[]{
1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
-template <>
-inline std::string toString<RandomVariableType>(const RandomVariableType& type) {
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableType& type) {
static const std::string typeNames[] = {"FREE", "CONST", "OP"};
- return typeNames[static_cast<int>(type)];
+ return os << typeNames[static_cast<int>(type)];
}
inline std::string alignedString(std::string str, int width) {
@@ -210,51 +201,45 @@ inline std::string alignedString(std::string str, int width) {
return str;
}
-template <>
-inline std::string toString<RandomVariableRange>(const RandomVariableRange& range) {
- return "[" + joinStr(", ", 20, range.getChoices()) + "]";
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableRange& range) {
+ return os << "[" + joinStr(", ", 20, range.getChoices()) + "]";
}
-template <>
-inline std::string toString<RandomOperandType>(const RandomOperandType& type) {
+inline std::ostream& operator<<(std::ostream& os, const RandomOperandType& type) {
static const std::string typeNames[] = {"Input", "Output", "Internal", "Parameter", "No Value"};
- return typeNames[static_cast<int>(type)];
+ return os << typeNames[static_cast<int>(type)];
}
-template <>
-inline std::string toString<RandomVariableNode>(const RandomVariableNode& var) {
- std::stringstream ss;
- ss << "var" << var->index << " = ";
+inline std::ostream& operator<<(std::ostream& os, const RandomVariableNode& var) {
+ os << "var" << var->index << " = ";
switch (var->type) {
case RandomVariableType::FREE:
- ss << "FREE " << toString(var->range);
+ os << "FREE " << var->range;
break;
case RandomVariableType::CONST:
- ss << "CONST " << toString(var->value);
+ os << "CONST " << var->value;
break;
case RandomVariableType::OP:
- ss << "var" << var->parent1->index << " " << var->op->getName();
- if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
- ss << ", " << toString(var->range);
+ os << "var" << var->parent1->index << " " << var->op->getName();
+ if (var->parent2 != nullptr) os << " var" << var->parent2->index;
+ os << ", " << var->range;
break;
default:
NN_FUZZER_CHECK(false);
}
- ss << ", timestamp = " << var->timestamp;
- return ss.str();
+ os << ", timestamp = " << var->timestamp;
+ return os;
}
-template <>
-inline std::string toString<RandomVariable>(const RandomVariable& var) {
- return "var" + std::to_string(var.get()->index);
+inline std::ostream& operator<<(std::ostream& os, const RandomVariable& var) {
+ return os << "var" + std::to_string(var.get()->index);
}
-template <>
-inline std::string toString<RandomOperand>(const RandomOperand& op) {
- return toString(op.type) + ", dimension = [" +
- joinStr(", ", op.dimensions,
- [](const RandomVariable& var) { return std::to_string(var.getValue()); }) +
- "], scale = " + toString(op.scale) + " , zero_point = " + toString(op.zeroPoint);
+inline std::ostream& operator<<(std::ostream& os, const RandomOperand& op) {
+ return os << op.type << ", dimension = ["
+ << joinStr(", ", op.dimensions,
+ [](const RandomVariable& var) { return std::to_string(var.getValue()); })
+ << "], scale = " << op.scale << " , zero_point = " << op.zeroPoint;
}
// This class is a workaround for two issues our code relies on:
diff --git a/nn/runtime/test/fuzzing/RandomVariable.cpp b/nn/runtime/test/fuzzing/RandomVariable.cpp
index d3f6ef7e2..f1067e184 100644
--- a/nn/runtime/test/fuzzing/RandomVariable.cpp
+++ b/nn/runtime/test/fuzzing/RandomVariable.cpp
@@ -1,1225 +1,1225 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "RandomVariable.h"
-
-#include <algorithm>
-#include <memory>
-#include <set>
-#include <string>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "RandomGraphGeneratorUtils.h"
-
-namespace android {
-namespace nn {
-namespace fuzzing_test {
-
-unsigned int RandomVariableBase::globalIndex = 0;
-int RandomVariable::defaultValue = 10;
-
-RandomVariableBase::RandomVariableBase(int value)
- : index(globalIndex++),
- type(RandomVariableType::CONST),
- range(value),
- value(value),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(int lower, int upper)
- : index(globalIndex++),
- type(RandomVariableType::FREE),
- range(lower, upper),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(const std::vector<int>& choices)
- : index(globalIndex++),
- type(RandomVariableType::FREE),
- range(choices),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs,
- const std::shared_ptr<const IRandomVariableOp>& op)
- : index(globalIndex++),
- type(RandomVariableType::OP),
- range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)),
- op(op),
- parent1(lhs),
- parent2(rhs),
- timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
-
-void RandomVariableRange::setRange(int lower, int upper) {
- // kInvalidValue indicates unlimited bound.
- auto head = lower == kInvalidValue ? mChoices.begin()
- : std::lower_bound(mChoices.begin(), mChoices.end(), lower);
- auto tail = upper == kInvalidValue ? mChoices.end()
- : std::upper_bound(mChoices.begin(), mChoices.end(), upper);
- NN_FUZZER_CHECK(head <= tail) << "Invalid range!";
- if (head != mChoices.begin() || tail != mChoices.end()) {
- mChoices = std::vector<int>(head, tail);
- }
-}
-
-int RandomVariableRange::toConst() {
- if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)};
- return mChoices[0];
-}
-
-RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) {
- std::vector<int> result(lhs.size() + rhs.size());
- auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(),
- rhs.mChoices.end(), result.begin());
- result.resize(it - result.begin());
- return RandomVariableRange(std::move(result));
-}
-
-void RandomVariableBase::freeze() {
- if (type == RandomVariableType::CONST) return;
- value = range.toConst();
- type = RandomVariableType::CONST;
-}
-
-int RandomVariableBase::getValue() const {
- switch (type) {
- case RandomVariableType::CONST:
- return value;
- case RandomVariableType::OP:
- return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue());
- default:
- NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index;
- return 0;
- }
-}
-
-void RandomVariableBase::updateTimestamp() {
- timestamp = RandomVariableNetwork::get()->getGlobalTime();
- NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp;
-}
-
-RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(const std::vector<int>& choices)
- : mVar(new RandomVariableBase(choices)) {
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(RandomVariableType type)
- : mVar(new RandomVariableBase(1, defaultValue)) {
- NN_FUZZER_CHECK(type == RandomVariableType::FREE);
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
- RandomVariableNetwork::get()->add(mVar);
-}
-RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs,
- const std::shared_ptr<const IRandomVariableOp>& op)
- : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) {
- // Make a copy if the parent is CONST. This will resolve the fake dependency problem.
- if (mVar->parent1->type == RandomVariableType::CONST) {
- mVar->parent1 = RandomVariable(mVar->parent1->value).get();
- }
- if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) {
- mVar->parent2 = RandomVariable(mVar->parent2->value).get();
- }
- mVar->parent1->children.push_back(mVar);
- if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar);
- RandomVariableNetwork::get()->add(mVar);
- NN_FUZZER_LOG << "New RandomVariable " << toString(mVar);
-}
-
-void RandomVariable::setRange(int lower, int upper) {
- NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr";
- NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index;
- size_t oldSize = mVar->range.size();
- mVar->range.setRange(lower, upper);
- // Only update the timestamp if the range is *indeed* narrowed down.
- if (mVar->range.size() != oldSize) mVar->updateTimestamp();
-}
-
-RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const {
- std::set<int> st;
- for (auto i : lhs.getChoices()) {
- for (auto j : rhs.getChoices()) {
- int res = this->eval(i, j);
- if (res > kMaxValue || res < -kMaxValue) continue;
- st.insert(res);
- }
- }
- return RandomVariableRange(st);
-}
-
-// Check if the range contains exactly all values in [min, max].
-static inline bool isContinuous(const std::set<int>* range) {
- return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size());
-}
-
-// Fill the set with a range of values specified by [lower, upper].
-static inline void fillRange(std::set<int>* range, int lower, int upper) {
- for (int i = lower; i <= upper; i++) range->insert(i);
-}
-
-// The slowest algorithm: iterate through every combinations of parents and save the valid pairs.
-void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const {
- // Avoid the binary search if the child is a closed range.
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- int res = this->eval(i, j);
- // Avoid the binary search if obviously out of range.
- if (res > child.second || res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- parent2Out->insert(j);
- childOut->insert(res);
- valid = true;
- }
- }
- if (valid) parent1Out->insert(i);
- }
-}
-
-// A helper template to make a class into a Singleton.
-template <class T>
-class Singleton : public T {
- public:
- static const std::shared_ptr<const T>& get() {
- static std::shared_ptr<const T> instance(new T);
- return instance;
- }
-};
-
-// A set of operations that only compute on a single input value.
-class IUnaryOp : public IRandomVariableOp {
- public:
- using IRandomVariableOp::eval;
- virtual int eval(int val) const = 0;
- virtual int eval(int lhs, int) const override { return eval(lhs); }
- // The slowest algorithm: iterate through every value of the parent and save the valid one.
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- NN_FUZZER_CHECK(parent2In == nullptr);
- NN_FUZZER_CHECK(parent2Out == nullptr);
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- int res = this->eval(i);
- if (res > child.second || res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- parent1Out->insert(i);
- childOut->insert(res);
- }
- }
- }
-};
-
-// A set of operations that only check conditional constraints.
-class IConstraintOp : public IRandomVariableOp {
- public:
- using IRandomVariableOp::eval;
- virtual bool check(int lhs, int rhs) const = 0;
- virtual int eval(int lhs, int rhs) const override {
- return check(lhs, rhs) ? 0 : kInvalidValue;
- }
- // The range for a constraint op is always {0}.
- virtual RandomVariableRange getInitRange(const RandomVariableRange&,
- const RandomVariableRange&) const override {
- return RandomVariableRange(0);
- }
- // The slowest algorithm:
- // iterate through every combinations of parents and save the valid pairs.
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out,
- std::set<int>* childOut) const override {
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- if (this->check(i, j)) {
- parent2Out->insert(j);
- valid = true;
- }
- }
- if (valid) parent1Out->insert(i);
- }
- if (!parent1Out->empty()) childOut->insert(0);
- }
-};
-
-class Addition : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs + rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // For parents and child with close range, the out range can be computed directly
- // without iterations.
- std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
- std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
-
- // From ranges for parent, evalute range for child.
- // [a, b] + [c, d] -> [a + c, b + d]
- fillRange(childOut, std::max(child.first, parent1.first + parent2.first),
- std::min(child.second, parent1.second + parent2.second));
-
- // From ranges for child and one parent, evalute range for another parent.
- // [a, b] - [c, d] -> [a - d, b - c]
- fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second),
- std::min(parent1.second, child.second - parent2.first));
- fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second),
- std::min(parent2.second, child.second - parent1.first));
- }
- }
- virtual const char* getName() const override { return "ADD"; }
-};
-
-class Subtraction : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs - rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // Similar algorithm as Addition.
- std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
- std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- fillRange(childOut, std::max(child.first, parent1.first - parent2.second),
- std::min(child.second, parent1.second - parent2.first));
- fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first),
- std::min(parent1.second, child.second + parent2.second));
- fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second),
- std::min(parent2.second, parent1.second - child.first));
- }
- }
- virtual const char* getName() const override { return "SUB"; }
-};
-
-class Multiplication : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return lhs * rhs; }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- if (lhs.min() < 0 || rhs.min() < 0) {
- return IRandomVariableOp::getInitRange(lhs, rhs);
- } else {
- int lower = std::min(lhs.min() * rhs.min(), kMaxValue);
- int upper = std::min(lhs.max() * rhs.max(), kMaxValue);
- return RandomVariableRange(lower, upper);
- }
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- bool isChildInContinuous = isContinuous(childIn);
- std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
- for (auto i : *parent1In) {
- bool valid = false;
- for (auto j : *parent2In) {
- int res = this->eval(i, j);
- // Since MUL increases monotonically with one value, break the loop if the
- // result is larger than the limit.
- if (res > child.second) break;
- if (res < child.first) continue;
- if (isChildInContinuous || childIn->find(res) != childIn->end()) {
- valid = true;
- parent2Out->insert(j);
- childOut->insert(res);
- }
- }
- if (valid) parent1Out->insert(i);
- }
- }
- }
- virtual const char* getName() const override { return "MUL"; }
-};
-
-class Division : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return rhs == 0 ? kInvalidValue : lhs / rhs;
- }
- virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
- const RandomVariableRange& rhs) const override {
- if (lhs.min() < 0 || rhs.min() <= 0) {
- return IRandomVariableOp::getInitRange(lhs, rhs);
- } else {
- return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min());
- }
- }
- virtual const char* getName() const override { return "DIV"; }
-};
-
-class ExactDivision : public Division {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs;
- }
- virtual const char* getName() const override { return "EXACT_DIV"; }
-};
-
-class Modulo : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override {
- return rhs == 0 ? kInvalidValue : lhs % rhs;
- }
- virtual RandomVariableRange getInitRange(const RandomVariableRange&,
- const RandomVariableRange& rhs) const override {
- return RandomVariableRange(0, rhs.max());
- }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- if (*childIn->begin() != 0 || childIn->size() != 1u) {
- IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
- childOut);
- } else {
- // For the special case that child is a const 0, it would be faster if the range for
- // parents are evaluated separately.
-
- // Evalute parent1 directly.
- for (auto i : *parent1In) {
- for (auto j : *parent2In) {
- if (i % j == 0) {
- parent1Out->insert(i);
- break;
- }
- }
- }
- // Evalute parent2, see if a multiple of parent2 value can be found in parent1.
- int parent1Max = *parent1In->rbegin();
- for (auto i : *parent2In) {
- int jMax = parent1Max / i;
- for (int j = 1; j <= jMax; j++) {
- if (parent1In->find(i * j) != parent1In->end()) {
- parent2Out->insert(i);
- break;
- }
- }
- }
- if (!parent1Out->empty()) childOut->insert(0);
- }
- }
- virtual const char* getName() const override { return "MOD"; }
-};
-
-class Maximum : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); }
- virtual const char* getName() const override { return "MAX"; }
-};
-
-class Minimum : public IRandomVariableOp {
- public:
- virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); }
- virtual const char* getName() const override { return "MIN"; }
-};
-
-class Square : public IUnaryOp {
- public:
- virtual int eval(int val) const override { return val * val; }
- virtual const char* getName() const override { return "SQUARE"; }
-};
-
-class UnaryEqual : public IUnaryOp {
- public:
- virtual int eval(int val) const override { return val; }
- virtual const char* getName() const override { return "UNARY_EQUAL"; }
-};
-
-class Equal : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs == rhs; }
- virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
- const std::set<int>* childIn, std::set<int>* parent1Out,
- std::set<int>* parent2Out, std::set<int>* childOut) const override {
- NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0);
- // The intersection of two sets can be found in O(n).
- std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(),
- parent2In->end(), std::inserter(*parent1Out, parent1Out->begin()));
- *parent2Out = *parent1Out;
- childOut->insert(0);
- }
- virtual const char* getName() const override { return "EQUAL"; }
-};
-
-class GreaterThan : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs > rhs; }
- virtual const char* getName() const override { return "GREATER_THAN"; }
-};
-
-class GreaterEqual : public IConstraintOp {
- public:
- virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; }
- virtual const char* getName() const override { return "GREATER_EQUAL"; }
-};
-
-class FloatMultiplication : public IUnaryOp {
- public:
- FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {}
- virtual int eval(int val) const override {
- return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand));
- }
- virtual const char* getName() const override { return "MUL_FLOAT"; }
-
- private:
- float mMultiplicand;
-};
-
-// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes.
-// Since there must be at most one edge between two RandomVariableNodes, we have to do something
-// special when both sides are refering to the same node.
-
-RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get())
- : RandomVariable(lhs, rhs, Singleton<Addition>::get());
-}
-RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(0)
- : RandomVariable(lhs, rhs, Singleton<Subtraction>::get());
-}
-RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get())
- : RandomVariable(lhs, rhs, Singleton<Multiplication>::get());
-}
-RandomVariable operator*(const RandomVariable& lhs, const float& rhs) {
- return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs));
-}
-RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(1)
- : RandomVariable(lhs, rhs, Singleton<Division>::get());
-}
-RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? RandomVariable(0)
- : RandomVariable(lhs, rhs, Singleton<Modulo>::get());
-}
-RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get());
-}
-RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) {
- return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get());
-}
-
-RandomVariable RandomVariable::exactDiv(const RandomVariable& other) {
- return mVar == other.get() ? RandomVariable(1)
- : RandomVariable(*this, other, Singleton<ExactDivision>::get());
-}
-
-RandomVariable RandomVariable::setEqual(const RandomVariable& other) const {
- RandomVariableNode node1 = mVar, node2 = other.get();
- NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index;
-
- // Do not setEqual on the same pair twice.
- if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) ||
- (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) {
- NN_FUZZER_LOG << "Already equal. Return.";
- return RandomVariable();
- }
-
- // If possible, always try UnaryEqual first to reduce the search space.
- // UnaryEqual can be used if node B is FREE and is evaluated later than node A.
- // TODO: Reduce code duplication.
- if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) {
- NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index;
- node2->type = RandomVariableType::OP;
- node2->parent1 = node1;
- node2->op = Singleton<UnaryEqual>::get();
- node1->children.push_back(node2);
- RandomVariableNetwork::get()->join(node1, node2);
- node1->updateTimestamp();
- return other;
- }
- if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) {
- NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index;
- node1->type = RandomVariableType::OP;
- node1->parent1 = node2;
- node1->op = Singleton<UnaryEqual>::get();
- node2->children.push_back(node1);
- RandomVariableNetwork::get()->join(node2, node1);
- node1->updateTimestamp();
- return *this;
- }
- return RandomVariable(*this, other, Singleton<Equal>::get());
-}
-
-RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const {
- NN_FUZZER_CHECK(mVar != other.get());
- return RandomVariable(*this, other, Singleton<GreaterThan>::get());
-}
-RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const {
- return mVar == other.get() ? *this
- : RandomVariable(*this, other, Singleton<GreaterEqual>::get());
-}
-
-void DisjointNetwork::add(const RandomVariableNode& var) {
- // Find the subnet index of the parents and decide the index for var.
- int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1];
- int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2];
- int ind = join(ind1, ind2);
- // If no parent, put it into a new subnet component.
- if (ind == -1) ind = mNextIndex++;
- NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind;
- mIndexMap[var] = ind;
- mEvalOrderMap[ind].push_back(var);
-}
-
-int DisjointNetwork::join(int ind1, int ind2) {
- if (ind1 == -1) return ind2;
- if (ind2 == -1) return ind1;
- if (ind1 == ind2) return ind1;
- NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2;
- auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2];
- // Append every node in ind2 to the end of ind1
- for (const auto& var : order2) {
- order1.push_back(var);
- mIndexMap[var] = ind1;
- }
- // Remove ind2 from mEvalOrderMap.
- mEvalOrderMap.erase(mEvalOrderMap.find(ind2));
- return ind1;
-}
-
-RandomVariableNetwork* RandomVariableNetwork::get() {
- static RandomVariableNetwork instance;
- return &instance;
-}
-
-void RandomVariableNetwork::initialize(int defaultValue) {
- RandomVariableBase::globalIndex = 0;
- RandomVariable::defaultValue = defaultValue;
- mIndexMap.clear();
- mEvalOrderMap.clear();
- mDimProd.clear();
- mNextIndex = 0;
- mGlobalTime = 0;
- mTimestamp = -1;
-}
-
-bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1,
- const RandomVariableNode& node2) {
- if (node2->type != RandomVariableType::FREE) return false;
- int ind1 = mIndexMap[node1];
- // node2 is of a different subnet.
- if (ind1 != mIndexMap[node2]) return true;
- for (const auto& node : mEvalOrderMap[ind1]) {
- if (node == node2) return false;
- // node2 is of the same subnet but evaluated later than node1.
- if (node == node1) return true;
- }
- NN_FUZZER_CHECK(false) << "Code executed in non-reachable region.";
- return false;
-}
-
-struct EvalInfo {
- // The RandomVariableNode that this EvalInfo is associated with.
- // var->value is the current value during evaluation.
- RandomVariableNode var;
-
- // The RandomVariable value is staged when a valid combination is found.
- std::set<int> staging;
-
- // The staging values are committed after a subnet evaluation.
- std::set<int> committed;
-
- // Keeps track of the latest timestamp that committed is updated.
- int timestamp;
-
- // For evalSubnetWithLocalNetwork.
- RandomVariableType originalType;
-
- // Should only invoke eval on OP RandomVariable.
- bool eval() {
- NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
- var->value = var->op->eval(var->parent1->value,
- var->parent2 == nullptr ? 0 : var->parent2->value);
- if (var->value == kInvalidValue) return false;
- return committed.find(var->value) != committed.end();
- }
- void stage() { staging.insert(var->value); }
- void commit() {
- // Only update committed and timestamp if the range is *indeed* changed.
- if (staging.size() != committed.size()) {
- committed = std::move(staging);
- timestamp = RandomVariableNetwork::get()->getGlobalTime();
- }
- staging.clear();
- }
- void updateRange() {
- // Only update range and timestamp if the range is *indeed* changed.
- if (committed.size() != var->range.size()) {
- var->range = RandomVariableRange(committed);
- var->timestamp = timestamp;
- }
- committed.clear();
- }
-
- EvalInfo(const RandomVariableNode& var)
- : var(var),
- committed(var->range.getChoices().begin(), var->range.getChoices().end()),
- timestamp(var->timestamp) {}
-};
-using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>;
-
-// For logging only.
-inline std::string toString(const RandomVariableNode& var, EvalContext* context) {
- std::stringstream ss;
- ss << "var" << var->index << " = ";
- const auto& committed = context->at(var).committed;
- switch (var->type) {
- case RandomVariableType::FREE:
- ss << "FREE ["
- << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]";
- break;
- case RandomVariableType::CONST:
- ss << "CONST " << toString(var->value);
- break;
- case RandomVariableType::OP:
- ss << "var" << var->parent1->index << " " << var->op->getName();
- if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
- ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end()))
- << "]";
- break;
- default:
- NN_FUZZER_CHECK(false);
- }
- ss << ", timestamp = " << context->at(var).timestamp;
- return ss.str();
-}
-
-// Check if the subnet needs to be re-evaluated by comparing the timestamps.
-static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime,
- EvalContext* context = nullptr) {
- for (const auto& var : evalOrder) {
- int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp;
- // If we find a node that has been modified since last evaluation, the subnet needs to be
- // re-evaluated.
- if (timestamp > subnetTime) return true;
- }
- return false;
-}
-
-// Helper function to evaluate the subnet recursively.
-// Iterate through all combinations of FREE RandomVariables choices.
-static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) {
- if (i == evalOrder.size()) {
- // Reach the end of the evaluation, find a valid combination.
- for (auto& var : evalOrder) context->at(var).stage();
- return;
- }
- const auto& var = evalOrder[i];
- if (var->type == RandomVariableType::FREE) {
- // For FREE RandomVariable, iterate through all valid choices.
- for (int val : context->at(var).committed) {
- var->value = val;
- evalSubnetHelper(evalOrder, context, i + 1);
- }
- return;
- } else if (var->type == RandomVariableType::OP) {
- // For OP RandomVariable, evaluate from parents and terminate if the result is invalid.
- if (!context->at(var).eval()) return;
- }
- evalSubnetHelper(evalOrder, context, i + 1);
-}
-
-// Check if the subnet has only one single OP RandomVariable.
-static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) {
- int numOp = 0;
- for (const auto& var : evalOrder) {
- if (var->type == RandomVariableType::OP) numOp++;
- if (numOp > 1) return false;
- }
- return numOp != 0;
-}
-
-// Evaluate with a potentially faster approach provided by IRandomVariableOp.
-static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder,
- EvalContext* context) {
- NN_FUZZER_LOG << "Identified as single op subnet";
- const auto& var = evalOrder.back();
- NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
- var->op->eval(&context->at(var->parent1).committed,
- var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed,
- &context->at(var).committed, &context->at(var->parent1).staging,
- var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging,
- &context->at(var).staging);
-}
-
-// Check if the number of combinations of FREE RandomVariables exceeds the limit.
-static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder,
- EvalContext* context = nullptr) {
- constexpr uint64_t kLimit = 1e8;
- uint64_t numCombinations = 1;
- for (const auto& var : evalOrder) {
- if (var->type == RandomVariableType::FREE) {
- size_t size =
- context == nullptr ? var->range.size() : context->at(var).committed.size();
- numCombinations *= size;
- // To prevent overflow.
- if (numCombinations > kLimit) return kLimit;
- }
- }
- return numCombinations;
-}
-
-// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE
-// RandomVariable exceeds the threshold kMaxNumCombinations.
-static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) {
- constexpr uint64_t kMaxNumCombinations = 1e7;
- NN_FUZZER_LOG << "Evaluate with brute force";
- if (isSingleOpSubnet(evalOrder)) {
- // If the network only have one single OP, dispatch to a faster evaluation.
- evalSubnetSingleOpHelper(evalOrder, context);
- } else {
- if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) {
- NN_FUZZER_LOG << "Terminate the evaluation because of large search range";
- std::cout << "[ ] Terminate the evaluation because of large search range"
- << std::endl;
- return false;
- }
- evalSubnetHelper(evalOrder, context);
- }
- for (auto& var : evalOrder) {
- if (context->at(var).staging.empty()) {
- NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context);
- return false;
- }
- context->at(var).commit();
- }
- return true;
-}
-
-struct LocalNetwork {
- EvaluationOrder evalOrder;
- std::vector<RandomVariableNode> bridgeNodes;
- int timestamp = 0;
-
- bool eval(EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp;
- // Temporarily treat bridge nodes as FREE RandomVariables.
- for (const auto& var : bridgeNodes) {
- context->at(var).originalType = var->type;
- var->type = RandomVariableType::FREE;
- }
- for (const auto& var : evalOrder) {
- context->at(var).staging.clear();
- NN_FUZZER_LOG << " - " << toString(var, context);
- }
- bool success = evalSubnetWithBruteForce(evalOrder, context);
- // Reset the RandomVariable types for bridge nodes.
- for (const auto& var : bridgeNodes) var->type = context->at(var).originalType;
- return success;
- }
-};
-
-// Partition the network further into LocalNetworks based on the result from bridge annotation
-// algorithm.
-class GraphPartitioner : public DisjointNetwork {
- public:
- GraphPartitioner() = default;
-
- std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) {
- annotateBridge(evalOrder);
- for (const auto& var : evalOrder) add(var);
- return get(timestamp);
- }
-
- private:
- GraphPartitioner(const GraphPartitioner&) = delete;
- GraphPartitioner& operator=(const GraphPartitioner&) = delete;
-
- // Find the parent-child relationship between var1 and var2, and reset the bridge.
- void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) {
- if (var1->parent1 == var2) {
- mBridgeInfo[var1].isParent1Bridge = true;
- } else if (var1->parent2 == var2) {
- mBridgeInfo[var1].isParent2Bridge = true;
- } else {
- setBridgeFlag(var2, var1);
- }
- }
-
- // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is
- // reachable from a node in the subtree of b. The complexity is O(V + E).
- // discoveryTime: The timestamp a node is visited
- // lowTime: The min discovery time of all reachable nodes from the subtree of the node.
- void annotateBridgeHelper(const RandomVariableNode& var, int* time) {
- mBridgeInfo[var].visited = true;
- mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++;
-
- // The algorithm operates on undirected graph. First find all adjacent nodes.
- auto adj = var->children;
- if (var->parent1 != nullptr) adj.push_back(var->parent1);
- if (var->parent2 != nullptr) adj.push_back(var->parent2);
-
- for (const auto& weakChild : adj) {
- auto child = weakChild.lock();
- NN_FUZZER_CHECK(child != nullptr);
- if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue;
- if (!mBridgeInfo[child].visited) {
- mBridgeInfo[child].parent = var;
- annotateBridgeHelper(child, time);
-
- // If none of nodes in the subtree of child is connected to any ancestors of var,
- // then it is a bridge.
- mBridgeInfo[var].lowTime =
- std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime);
- if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime)
- setBridgeFlag(var, child);
- } else if (mBridgeInfo[var].parent != child) {
- mBridgeInfo[var].lowTime =
- std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime);
- }
- }
- }
-
- // Find all bridges in the subnet with DFS.
- void annotateBridge(const EvaluationOrder& evalOrder) {
- for (const auto& var : evalOrder) mBridgeInfo[var];
- int time = 0;
- for (const auto& var : evalOrder) {
- if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time);
- }
- }
-
- // Re-partition the network by treating bridges as no edge.
- void add(const RandomVariableNode& var) {
- auto parent1 = var->parent1;
- auto parent2 = var->parent2;
- if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr;
- if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr;
- DisjointNetwork::add(var);
- var->parent1 = parent1;
- var->parent2 = parent2;
- }
-
- // Add bridge nodes to the local network and remove single node subnet.
- std::vector<LocalNetwork> get(int timestamp) {
- std::vector<LocalNetwork> res;
- for (auto& pair : mEvalOrderMap) {
- // We do not need to evaluate subnet with only a single node.
- if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue;
- res.emplace_back();
- for (const auto& var : pair.second) {
- if (mBridgeInfo[var].isParent1Bridge) {
- res.back().evalOrder.push_back(var->parent1);
- res.back().bridgeNodes.push_back(var->parent1);
- }
- if (mBridgeInfo[var].isParent2Bridge) {
- res.back().evalOrder.push_back(var->parent2);
- res.back().bridgeNodes.push_back(var->parent2);
- }
- res.back().evalOrder.push_back(var);
- }
- res.back().timestamp = timestamp;
- }
- return res;
- }
-
- // For bridge discovery algorithm.
- struct BridgeInfo {
- bool isParent1Bridge = false;
- bool isParent2Bridge = false;
- int discoveryTime = 0;
- int lowTime = 0;
- bool visited = false;
- std::shared_ptr<RandomVariableBase> parent = nullptr;
- };
- std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo;
-};
-
-// Evaluate subnets repeatedly until converge.
-// Class T_Subnet must have member evalOrder, timestamp, and member function eval.
-template <class T_Subnet>
-inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) {
- bool terminate = false;
- while (!terminate) {
- terminate = true;
- for (auto& subnet : *subnets) {
- if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) {
- if (!subnet.eval(context)) return false;
- subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime();
- terminate = false;
- }
- }
- }
- return true;
-}
-
-// Evaluate the subnet by first partitioning it further into LocalNetworks.
-static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp,
- EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate with local network";
- auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp);
- return evalSubnetsRepeatedly(&localNetworks, context);
-}
-
-struct LeafNetwork {
- EvaluationOrder evalOrder;
- int timestamp = 0;
- LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) {
- std::set<RandomVariableNode> visited;
- constructorHelper(var, &visited);
- }
- // Construct the leaf network by recursively including parent nodes.
- void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) {
- if (var == nullptr || visited->find(var) != visited->end()) return;
- constructorHelper(var->parent1, visited);
- constructorHelper(var->parent2, visited);
- visited->insert(var);
- evalOrder.push_back(var);
- }
- bool eval(EvalContext* context) {
- return evalSubnetWithLocalNetwork(evalOrder, timestamp, context);
- }
-};
-
-// Evaluate the subnet by leaf network.
-// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%).
-// The random graph generator is expected to retry if it fails.
-static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp,
- EvalContext* context) {
- NN_FUZZER_LOG << "Evaluate with leaf network";
- // Construct leaf networks.
- std::vector<LeafNetwork> leafNetworks;
- for (const auto& var : evalOrder) {
- if (var->children.empty()) {
- NN_FUZZER_LOG << "Found leaf " << toString(var, context);
- leafNetworks.emplace_back(var, timestamp);
- }
- }
- return evalSubnetsRepeatedly(&leafNetworks, context);
-}
-
-void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) {
- if (dims.size() <= 1) return;
- EvaluationOrder order;
- for (const auto& dim : dims) order.push_back(dim.get());
- mDimProd.push_back(order);
-}
-
-bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd,
- const std::unordered_map<RandomVariableNode, int>& indexMap,
- EvalContext* context, std::set<int>* dirtySubnets) {
- for (auto& evalOrder : mDimProd) {
- NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size();
- // Initialize EvalInfo of each RandomVariable.
- for (auto& var : evalOrder) {
- if (context->find(var) == context->end()) context->emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var, context);
- }
-
- // Enforce the product of the dimension values below kMaxValue:
- // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...)
- int prod = 1;
- for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin());
- for (auto& var : evalOrder) {
- auto& committed = context->at(var).committed;
- int maxValue = kMaxValue / (prod / *committed.begin());
- auto it = committed.upper_bound(maxValue);
- // var has empty range -> no solution.
- if (it == committed.begin()) return false;
- // The range is not modified -> continue.
- if (it == committed.end()) continue;
- // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation.
- committed.erase(it, committed.end());
- context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime();
- dirtySubnets->insert(indexMap.at(var));
- }
- }
- return true;
-}
-
-bool RandomVariableNetwork::evalRange() {
- constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500;
- constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5;
- NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks";
- EvalContext context;
- std::set<int> dirtySubnets; // Which subnets needs evaluation.
- for (auto& pair : mEvalOrderMap) {
- const auto& evalOrder = pair.second;
- // Decide whether needs evaluation by timestamp -- if no range has changed after the last
- // evaluation, then the subnet does not need re-evaluation.
- if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue;
- dirtySubnets.insert(pair.first);
- }
- if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
-
- // Repeat until the ranges converge.
- while (!dirtySubnets.empty()) {
- for (int ind : dirtySubnets) {
- const auto& evalOrder = mEvalOrderMap[ind];
- NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size();
-
- // Initialize EvalInfo of each RandomVariable.
- for (auto& var : evalOrder) {
- if (context.find(var) == context.end()) context.emplace(var, var);
- NN_FUZZER_LOG << " - " << toString(var, &context);
- }
-
- // Dispatch to different algorithm according to search range.
- bool success;
- uint64_t numCombinations = getNumCombinations(evalOrder);
- if (numCombinations <= kMaxNumCombinationsWithBruteForce) {
- success = evalSubnetWithBruteForce(evalOrder, &context);
- } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) {
- success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context);
- } else {
- success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context);
- }
- if (!success) return false;
- }
- dirtySubnets.clear();
- if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
- }
- // A successful evaluation, update RandomVariables from EvalContext.
- for (auto& pair : context) pair.second.updateRange();
- mTimestamp = getGlobalTime();
- NN_FUZZER_LOG << "Finish range evaluation";
- return true;
-}
-
-static void unsetEqual(const RandomVariableNode& node) {
- if (node == nullptr) return;
- NN_FUZZER_LOG << "Unset equality of var" << node->index;
- auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) {
- return ptr.lock() == node;
- };
- RandomVariableNode parent1 = node->parent1, parent2 = node->parent2;
- parent1->children.erase(
- std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual));
- node->parent1 = nullptr;
- if (parent2 != nullptr) {
- // For Equal.
- parent2->children.erase(
- std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual));
- node->parent2 = nullptr;
- } else {
- // For UnaryEqual.
- node->type = RandomVariableType::FREE;
- node->op = nullptr;
- }
-}
-
-// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is
-// constructed. Only used when setEqualIfCompatible results in incompatible.
-class RandomVariableNetwork::Reverter {
- public:
- // Take a snapshot of RandomVariableNetwork when Reverter is constructed.
- Reverter() : mSnapshot(*RandomVariableNetwork::get()) {}
- // Add constraint (Equal) nodes to the reverter.
- void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); }
- void revert() {
- NN_FUZZER_LOG << "Revert RandomVariableNetwork";
- // Release the constraints.
- for (const auto& node : mEqualNodes) unsetEqual(node);
- // Reset all member variables.
- *RandomVariableNetwork::get() = std::move(mSnapshot);
- }
-
- private:
- Reverter(const Reverter&) = delete;
- Reverter& operator=(const Reverter&) = delete;
- RandomVariableNetwork mSnapshot;
- std::vector<RandomVariableNode> mEqualNodes;
-};
-
-bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs,
- const std::vector<RandomVariable>& rhs) {
- NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {"
- << joinStr(", ", rhs) << "}";
- if (lhs.size() != rhs.size()) return false;
- Reverter reverter;
- bool result = true;
- for (size_t i = 0; i < lhs.size(); i++) {
- auto node = lhs[i].setEqual(rhs[i]).get();
- reverter.addNode(node);
- // Early terminate if there is no common choice between two ranges.
- if (node != nullptr && node->range.empty()) result = false;
- }
- result = result && evalRange();
- if (!result) reverter.revert();
- NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]");
- return result;
-}
-
-bool RandomVariableNetwork::freeze() {
- NN_FUZZER_LOG << "Freeze the random network";
- if (!evalRange()) return false;
-
- std::vector<RandomVariableNode> nodes;
- for (const auto& pair : mEvalOrderMap) {
- // Find all FREE RandomVariables in the subnet.
- for (const auto& var : pair.second) {
- if (var->type == RandomVariableType::FREE) nodes.push_back(var);
- }
- }
-
- // Randomly shuffle the order, this is for a more uniform randomness.
- randomShuffle(&nodes);
-
- // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable.
- // TODO: Might be able to optimize this.
- for (const auto& var : nodes) {
- if (var->type != RandomVariableType::FREE) continue;
- size_t size = var->range.size();
- NN_FUZZER_LOG << "Freeze " << toString(var);
- var->freeze();
- NN_FUZZER_LOG << " " << toString(var);
- // There is no need to re-evaluate if the FREE RandomVariable have only one choice.
- if (size > 1) {
- var->updateTimestamp();
- if (!evalRange()) {
- NN_FUZZER_LOG << "Freeze failed at " << toString(var);
- return false;
- }
- }
- }
- NN_FUZZER_LOG << "Finish freezing the random network";
- return true;
-}
-
-} // namespace fuzzing_test
-} // namespace nn
-} // namespace android
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RandomVariable.h"
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "RandomGraphGeneratorUtils.h"
+
+namespace android {
+namespace nn {
+namespace fuzzing_test {
+
+unsigned int RandomVariableBase::globalIndex = 0;
+int RandomVariable::defaultValue = 10;
+
+RandomVariableBase::RandomVariableBase(int value)
+ : index(globalIndex++),
+ type(RandomVariableType::CONST),
+ range(value),
+ value(value),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(int lower, int upper)
+ : index(globalIndex++),
+ type(RandomVariableType::FREE),
+ range(lower, upper),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(const std::vector<int>& choices)
+ : index(globalIndex++),
+ type(RandomVariableType::FREE),
+ range(choices),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs,
+ const std::shared_ptr<const IRandomVariableOp>& op)
+ : index(globalIndex++),
+ type(RandomVariableType::OP),
+ range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)),
+ op(op),
+ parent1(lhs),
+ parent2(rhs),
+ timestamp(RandomVariableNetwork::get()->getGlobalTime()) {}
+
+void RandomVariableRange::setRange(int lower, int upper) {
+ // kInvalidValue indicates unlimited bound.
+ auto head = lower == kInvalidValue ? mChoices.begin()
+ : std::lower_bound(mChoices.begin(), mChoices.end(), lower);
+ auto tail = upper == kInvalidValue ? mChoices.end()
+ : std::upper_bound(mChoices.begin(), mChoices.end(), upper);
+ NN_FUZZER_CHECK(head <= tail) << "Invalid range!";
+ if (head != mChoices.begin() || tail != mChoices.end()) {
+ mChoices = std::vector<int>(head, tail);
+ }
+}
+
+int RandomVariableRange::toConst() {
+ if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)};
+ return mChoices[0];
+}
+
+RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) {
+ std::vector<int> result(lhs.size() + rhs.size());
+ auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(),
+ rhs.mChoices.end(), result.begin());
+ result.resize(it - result.begin());
+ return RandomVariableRange(std::move(result));
+}
+
+void RandomVariableBase::freeze() {
+ if (type == RandomVariableType::CONST) return;
+ value = range.toConst();
+ type = RandomVariableType::CONST;
+}
+
+int RandomVariableBase::getValue() const {
+ switch (type) {
+ case RandomVariableType::CONST:
+ return value;
+ case RandomVariableType::OP:
+ return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue());
+ default:
+ NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index;
+ return 0;
+ }
+}
+
+void RandomVariableBase::updateTimestamp() {
+ timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp;
+}
+
+RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(const std::vector<int>& choices)
+ : mVar(new RandomVariableBase(choices)) {
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(RandomVariableType type)
+ : mVar(new RandomVariableBase(1, defaultValue)) {
+ NN_FUZZER_CHECK(type == RandomVariableType::FREE);
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+ RandomVariableNetwork::get()->add(mVar);
+}
+RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs,
+ const std::shared_ptr<const IRandomVariableOp>& op)
+ : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) {
+ // Make a copy if the parent is CONST. This will resolve the fake dependency problem.
+ if (mVar->parent1->type == RandomVariableType::CONST) {
+ mVar->parent1 = RandomVariable(mVar->parent1->value).get();
+ }
+ if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) {
+ mVar->parent2 = RandomVariable(mVar->parent2->value).get();
+ }
+ mVar->parent1->children.push_back(mVar);
+ if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar);
+ RandomVariableNetwork::get()->add(mVar);
+ NN_FUZZER_LOG << "New RandomVariable " << mVar;
+}
+
+void RandomVariable::setRange(int lower, int upper) {
+ NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr";
+ NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index;
+ size_t oldSize = mVar->range.size();
+ mVar->range.setRange(lower, upper);
+ // Only update the timestamp if the range is *indeed* narrowed down.
+ if (mVar->range.size() != oldSize) mVar->updateTimestamp();
+}
+
+RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const {
+ std::set<int> st;
+ for (auto i : lhs.getChoices()) {
+ for (auto j : rhs.getChoices()) {
+ int res = this->eval(i, j);
+ if (res > kMaxValue || res < -kMaxValue) continue;
+ st.insert(res);
+ }
+ }
+ return RandomVariableRange(st);
+}
+
+// Check if the range contains exactly all values in [min, max].
+static inline bool isContinuous(const std::set<int>* range) {
+ return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size());
+}
+
+// Fill the set with a range of values specified by [lower, upper].
+static inline void fillRange(std::set<int>* range, int lower, int upper) {
+ for (int i = lower; i <= upper; i++) range->insert(i);
+}
+
+// The slowest algorithm: iterate through every combinations of parents and save the valid pairs.
+void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const {
+ // Avoid the binary search if the child is a closed range.
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ int res = this->eval(i, j);
+ // Avoid the binary search if obviously out of range.
+ if (res > child.second || res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ parent2Out->insert(j);
+ childOut->insert(res);
+ valid = true;
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+}
+
+// A helper template to make a class into a Singleton.
+template <class T>
+class Singleton : public T {
+ public:
+ static const std::shared_ptr<const T>& get() {
+ static std::shared_ptr<const T> instance(new T);
+ return instance;
+ }
+};
+
+// A set of operations that only compute on a single input value.
+class IUnaryOp : public IRandomVariableOp {
+ public:
+ using IRandomVariableOp::eval;
+ virtual int eval(int val) const = 0;
+ virtual int eval(int lhs, int) const override { return eval(lhs); }
+ // The slowest algorithm: iterate through every value of the parent and save the valid one.
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ NN_FUZZER_CHECK(parent2In == nullptr);
+ NN_FUZZER_CHECK(parent2Out == nullptr);
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ int res = this->eval(i);
+ if (res > child.second || res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ parent1Out->insert(i);
+ childOut->insert(res);
+ }
+ }
+ }
+};
+
+// A set of operations that only check conditional constraints.
+class IConstraintOp : public IRandomVariableOp {
+ public:
+ using IRandomVariableOp::eval;
+ virtual bool check(int lhs, int rhs) const = 0;
+ virtual int eval(int lhs, int rhs) const override {
+ return check(lhs, rhs) ? 0 : kInvalidValue;
+ }
+ // The range for a constraint op is always {0}.
+ virtual RandomVariableRange getInitRange(const RandomVariableRange&,
+ const RandomVariableRange&) const override {
+ return RandomVariableRange(0);
+ }
+ // The slowest algorithm:
+ // iterate through every combinations of parents and save the valid pairs.
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out,
+ std::set<int>* childOut) const override {
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ if (this->check(i, j)) {
+ parent2Out->insert(j);
+ valid = true;
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+ if (!parent1Out->empty()) childOut->insert(0);
+ }
+};
+
+class Addition : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs + rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // For parents and child with close range, the out range can be computed directly
+ // without iterations.
+ std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
+ std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+
+ // From ranges for parent, evaluate range for child.
+ // [a, b] + [c, d] -> [a + c, b + d]
+ fillRange(childOut, std::max(child.first, parent1.first + parent2.first),
+ std::min(child.second, parent1.second + parent2.second));
+
+ // From ranges for child and one parent, evaluate range for another parent.
+ // [a, b] - [c, d] -> [a - d, b - c]
+ fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second),
+ std::min(parent1.second, child.second - parent2.first));
+ fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second),
+ std::min(parent2.second, child.second - parent1.first));
+ }
+ }
+ virtual const char* getName() const override { return "ADD"; }
+};
+
+class Subtraction : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs - rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // Similar algorithm as Addition.
+ std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()};
+ std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()};
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ fillRange(childOut, std::max(child.first, parent1.first - parent2.second),
+ std::min(child.second, parent1.second - parent2.first));
+ fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first),
+ std::min(parent1.second, child.second + parent2.second));
+ fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second),
+ std::min(parent2.second, parent1.second - child.first));
+ }
+ }
+ virtual const char* getName() const override { return "SUB"; }
+};
+
+class Multiplication : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return lhs * rhs; }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ if (lhs.min() < 0 || rhs.min() < 0) {
+ return IRandomVariableOp::getInitRange(lhs, rhs);
+ } else {
+ int lower = std::min(lhs.min() * rhs.min(), kMaxValue);
+ int upper = std::min(lhs.max() * rhs.max(), kMaxValue);
+ return RandomVariableRange(lower, upper);
+ }
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ bool isChildInContinuous = isContinuous(childIn);
+ std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()};
+ for (auto i : *parent1In) {
+ bool valid = false;
+ for (auto j : *parent2In) {
+ int res = this->eval(i, j);
+ // Since MUL increases monotonically with one value, break the loop if the
+ // result is larger than the limit.
+ if (res > child.second) break;
+ if (res < child.first) continue;
+ if (isChildInContinuous || childIn->find(res) != childIn->end()) {
+ valid = true;
+ parent2Out->insert(j);
+ childOut->insert(res);
+ }
+ }
+ if (valid) parent1Out->insert(i);
+ }
+ }
+ }
+ virtual const char* getName() const override { return "MUL"; }
+};
+
+class Division : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return rhs == 0 ? kInvalidValue : lhs / rhs;
+ }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs,
+ const RandomVariableRange& rhs) const override {
+ if (lhs.min() < 0 || rhs.min() <= 0) {
+ return IRandomVariableOp::getInitRange(lhs, rhs);
+ } else {
+ return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min());
+ }
+ }
+ virtual const char* getName() const override { return "DIV"; }
+};
+
+class ExactDivision : public Division {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs;
+ }
+ virtual const char* getName() const override { return "EXACT_DIV"; }
+};
+
+class Modulo : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override {
+ return rhs == 0 ? kInvalidValue : lhs % rhs;
+ }
+ virtual RandomVariableRange getInitRange(const RandomVariableRange&,
+ const RandomVariableRange& rhs) const override {
+ return RandomVariableRange(0, rhs.max());
+ }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ if (*childIn->begin() != 0 || childIn->size() != 1u) {
+ IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out,
+ childOut);
+ } else {
+ // For the special case that child is a const 0, it would be faster if the range for
+ // parents are evaluated separately.
+
+ // Evaluate parent1 directly.
+ for (auto i : *parent1In) {
+ for (auto j : *parent2In) {
+ if (i % j == 0) {
+ parent1Out->insert(i);
+ break;
+ }
+ }
+ }
+ // Evaluate parent2, see if a multiple of parent2 value can be found in parent1.
+ int parent1Max = *parent1In->rbegin();
+ for (auto i : *parent2In) {
+ int jMax = parent1Max / i;
+ for (int j = 1; j <= jMax; j++) {
+ if (parent1In->find(i * j) != parent1In->end()) {
+ parent2Out->insert(i);
+ break;
+ }
+ }
+ }
+ if (!parent1Out->empty()) childOut->insert(0);
+ }
+ }
+ virtual const char* getName() const override { return "MOD"; }
+};
+
+class Maximum : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); }
+ virtual const char* getName() const override { return "MAX"; }
+};
+
+class Minimum : public IRandomVariableOp {
+ public:
+ virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); }
+ virtual const char* getName() const override { return "MIN"; }
+};
+
+class Square : public IUnaryOp {
+ public:
+ virtual int eval(int val) const override { return val * val; }
+ virtual const char* getName() const override { return "SQUARE"; }
+};
+
+class UnaryEqual : public IUnaryOp {
+ public:
+ virtual int eval(int val) const override { return val; }
+ virtual const char* getName() const override { return "UNARY_EQUAL"; }
+};
+
+class Equal : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs == rhs; }
+ virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In,
+ const std::set<int>* childIn, std::set<int>* parent1Out,
+ std::set<int>* parent2Out, std::set<int>* childOut) const override {
+ NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0);
+ // The intersection of two sets can be found in O(n).
+ std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(),
+ parent2In->end(), std::inserter(*parent1Out, parent1Out->begin()));
+ *parent2Out = *parent1Out;
+ childOut->insert(0);
+ }
+ virtual const char* getName() const override { return "EQUAL"; }
+};
+
+class GreaterThan : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs > rhs; }
+ virtual const char* getName() const override { return "GREATER_THAN"; }
+};
+
+class GreaterEqual : public IConstraintOp {
+ public:
+ virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; }
+ virtual const char* getName() const override { return "GREATER_EQUAL"; }
+};
+
+class FloatMultiplication : public IUnaryOp {
+ public:
+ FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {}
+ virtual int eval(int val) const override {
+ return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand));
+ }
+ virtual const char* getName() const override { return "MUL_FLOAT"; }
+
+ private:
+ float mMultiplicand;
+};
+
+// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes.
+// Since there must be at most one edge between two RandomVariableNodes, we have to do something
+// special when both sides are refering to the same node.
+
+RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get())
+ : RandomVariable(lhs, rhs, Singleton<Addition>::get());
+}
+RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(0)
+ : RandomVariable(lhs, rhs, Singleton<Subtraction>::get());
+}
+RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get())
+ : RandomVariable(lhs, rhs, Singleton<Multiplication>::get());
+}
+RandomVariable operator*(const RandomVariable& lhs, const float& rhs) {
+ return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs));
+}
+RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(1)
+ : RandomVariable(lhs, rhs, Singleton<Division>::get());
+}
+RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? RandomVariable(0)
+ : RandomVariable(lhs, rhs, Singleton<Modulo>::get());
+}
+RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get());
+}
+RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) {
+ return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get());
+}
+
+RandomVariable RandomVariable::exactDiv(const RandomVariable& other) {
+ return mVar == other.get() ? RandomVariable(1)
+ : RandomVariable(*this, other, Singleton<ExactDivision>::get());
+}
+
+RandomVariable RandomVariable::setEqual(const RandomVariable& other) const {
+ RandomVariableNode node1 = mVar, node2 = other.get();
+ NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index;
+
+ // Do not setEqual on the same pair twice.
+ if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) ||
+ (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) {
+ NN_FUZZER_LOG << "Already equal. Return.";
+ return RandomVariable();
+ }
+
+ // If possible, always try UnaryEqual first to reduce the search space.
+ // UnaryEqual can be used if node B is FREE and is evaluated later than node A.
+ // TODO: Reduce code duplication.
+ if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) {
+ NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index;
+ node2->type = RandomVariableType::OP;
+ node2->parent1 = node1;
+ node2->op = Singleton<UnaryEqual>::get();
+ node1->children.push_back(node2);
+ RandomVariableNetwork::get()->join(node1, node2);
+ node1->updateTimestamp();
+ return other;
+ }
+ if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) {
+ NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index;
+ node1->type = RandomVariableType::OP;
+ node1->parent1 = node2;
+ node1->op = Singleton<UnaryEqual>::get();
+ node2->children.push_back(node1);
+ RandomVariableNetwork::get()->join(node2, node1);
+ node1->updateTimestamp();
+ return *this;
+ }
+ return RandomVariable(*this, other, Singleton<Equal>::get());
+}
+
+RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const {
+ NN_FUZZER_CHECK(mVar != other.get());
+ return RandomVariable(*this, other, Singleton<GreaterThan>::get());
+}
+RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const {
+ return mVar == other.get() ? *this
+ : RandomVariable(*this, other, Singleton<GreaterEqual>::get());
+}
+
+void DisjointNetwork::add(const RandomVariableNode& var) {
+ // Find the subnet index of the parents and decide the index for var.
+ int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1];
+ int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2];
+ int ind = join(ind1, ind2);
+ // If no parent, put it into a new subnet component.
+ if (ind == -1) ind = mNextIndex++;
+ NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind;
+ mIndexMap[var] = ind;
+ mEvalOrderMap[ind].push_back(var);
+}
+
+int DisjointNetwork::join(int ind1, int ind2) {
+ if (ind1 == -1) return ind2;
+ if (ind2 == -1) return ind1;
+ if (ind1 == ind2) return ind1;
+ NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2;
+ auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2];
+ // Append every node in ind2 to the end of ind1
+ for (const auto& var : order2) {
+ order1.push_back(var);
+ mIndexMap[var] = ind1;
+ }
+ // Remove ind2 from mEvalOrderMap.
+ mEvalOrderMap.erase(mEvalOrderMap.find(ind2));
+ return ind1;
+}
+
+RandomVariableNetwork* RandomVariableNetwork::get() {
+ static RandomVariableNetwork instance;
+ return &instance;
+}
+
+void RandomVariableNetwork::initialize(int defaultValue) {
+ RandomVariableBase::globalIndex = 0;
+ RandomVariable::defaultValue = defaultValue;
+ mIndexMap.clear();
+ mEvalOrderMap.clear();
+ mDimProd.clear();
+ mNextIndex = 0;
+ mGlobalTime = 0;
+ mTimestamp = -1;
+}
+
+bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1,
+ const RandomVariableNode& node2) {
+ if (node2->type != RandomVariableType::FREE) return false;
+ int ind1 = mIndexMap[node1];
+ // node2 is of a different subnet.
+ if (ind1 != mIndexMap[node2]) return true;
+ for (const auto& node : mEvalOrderMap[ind1]) {
+ if (node == node2) return false;
+ // node2 is of the same subnet but evaluated later than node1.
+ if (node == node1) return true;
+ }
+ NN_FUZZER_CHECK(false) << "Code executed in non-reachable region.";
+ return false;
+}
+
+struct EvalInfo {
+ // The RandomVariableNode that this EvalInfo is associated with.
+ // var->value is the current value during evaluation.
+ RandomVariableNode var;
+
+ // The RandomVariable value is staged when a valid combination is found.
+ std::set<int> staging;
+
+ // The staging values are committed after a subnet evaluation.
+ std::set<int> committed;
+
+ // Keeps track of the latest timestamp that committed is updated.
+ int timestamp;
+
+ // For evalSubnetWithLocalNetwork.
+ RandomVariableType originalType;
+
+ // Should only invoke eval on OP RandomVariable.
+ bool eval() {
+ NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
+ var->value = var->op->eval(var->parent1->value,
+ var->parent2 == nullptr ? 0 : var->parent2->value);
+ if (var->value == kInvalidValue) return false;
+ return committed.find(var->value) != committed.end();
+ }
+ void stage() { staging.insert(var->value); }
+ void commit() {
+ // Only update committed and timestamp if the range is *indeed* changed.
+ if (staging.size() != committed.size()) {
+ committed = std::move(staging);
+ timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ }
+ staging.clear();
+ }
+ void updateRange() {
+ // Only update range and timestamp if the range is *indeed* changed.
+ if (committed.size() != var->range.size()) {
+ var->range = RandomVariableRange(committed);
+ var->timestamp = timestamp;
+ }
+ committed.clear();
+ }
+
+ EvalInfo(const RandomVariableNode& var)
+ : var(var),
+ committed(var->range.getChoices().begin(), var->range.getChoices().end()),
+ timestamp(var->timestamp) {}
+};
+using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>;
+
+// For logging only.
+inline std::string toString(const RandomVariableNode& var, EvalContext* context) {
+ std::stringstream ss;
+ ss << "var" << var->index << " = ";
+ const auto& committed = context->at(var).committed;
+ switch (var->type) {
+ case RandomVariableType::FREE:
+ ss << "FREE ["
+ << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]";
+ break;
+ case RandomVariableType::CONST:
+ ss << "CONST " << var->value;
+ break;
+ case RandomVariableType::OP:
+ ss << "var" << var->parent1->index << " " << var->op->getName();
+ if (var->parent2 != nullptr) ss << " var" << var->parent2->index;
+ ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end()))
+ << "]";
+ break;
+ default:
+ NN_FUZZER_CHECK(false);
+ }
+ ss << ", timestamp = " << context->at(var).timestamp;
+ return ss.str();
+}
+
+// Check if the subnet needs to be re-evaluated by comparing the timestamps.
+static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime,
+ EvalContext* context = nullptr) {
+ for (const auto& var : evalOrder) {
+ int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp;
+ // If we find a node that has been modified since last evaluation, the subnet needs to be
+ // re-evaluated.
+ if (timestamp > subnetTime) return true;
+ }
+ return false;
+}
+
+// Helper function to evaluate the subnet recursively.
+// Iterate through all combinations of FREE RandomVariables choices.
+static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) {
+ if (i == evalOrder.size()) {
+ // Reach the end of the evaluation, find a valid combination.
+ for (auto& var : evalOrder) context->at(var).stage();
+ return;
+ }
+ const auto& var = evalOrder[i];
+ if (var->type == RandomVariableType::FREE) {
+ // For FREE RandomVariable, iterate through all valid choices.
+ for (int val : context->at(var).committed) {
+ var->value = val;
+ evalSubnetHelper(evalOrder, context, i + 1);
+ }
+ return;
+ } else if (var->type == RandomVariableType::OP) {
+ // For OP RandomVariable, evaluate from parents and terminate if the result is invalid.
+ if (!context->at(var).eval()) return;
+ }
+ evalSubnetHelper(evalOrder, context, i + 1);
+}
+
+// Check if the subnet has only one single OP RandomVariable.
+static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) {
+ int numOp = 0;
+ for (const auto& var : evalOrder) {
+ if (var->type == RandomVariableType::OP) numOp++;
+ if (numOp > 1) return false;
+ }
+ return numOp != 0;
+}
+
+// Evaluate with a potentially faster approach provided by IRandomVariableOp.
+static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Identified as single op subnet";
+ const auto& var = evalOrder.back();
+ NN_FUZZER_CHECK(var->type == RandomVariableType::OP);
+ var->op->eval(&context->at(var->parent1).committed,
+ var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed,
+ &context->at(var).committed, &context->at(var->parent1).staging,
+ var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging,
+ &context->at(var).staging);
+}
+
+// Check if the number of combinations of FREE RandomVariables exceeds the limit.
+static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder,
+ EvalContext* context = nullptr) {
+ constexpr uint64_t kLimit = 1e8;
+ uint64_t numCombinations = 1;
+ for (const auto& var : evalOrder) {
+ if (var->type == RandomVariableType::FREE) {
+ size_t size =
+ context == nullptr ? var->range.size() : context->at(var).committed.size();
+ numCombinations *= size;
+ // To prevent overflow.
+ if (numCombinations > kLimit) return kLimit;
+ }
+ }
+ return numCombinations;
+}
+
+// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE
+// RandomVariable exceeds the threshold kMaxNumCombinations.
+static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) {
+ constexpr uint64_t kMaxNumCombinations = 1e7;
+ NN_FUZZER_LOG << "Evaluate with brute force";
+ if (isSingleOpSubnet(evalOrder)) {
+ // If the network only have one single OP, dispatch to a faster evaluation.
+ evalSubnetSingleOpHelper(evalOrder, context);
+ } else {
+ if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) {
+ NN_FUZZER_LOG << "Terminate the evaluation because of large search range";
+ std::cout << "[ ] Terminate the evaluation because of large search range"
+ << std::endl;
+ return false;
+ }
+ evalSubnetHelper(evalOrder, context);
+ }
+ for (auto& var : evalOrder) {
+ if (context->at(var).staging.empty()) {
+ NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context);
+ return false;
+ }
+ context->at(var).commit();
+ }
+ return true;
+}
+
+struct LocalNetwork {
+ EvaluationOrder evalOrder;
+ std::vector<RandomVariableNode> bridgeNodes;
+ int timestamp = 0;
+
+ bool eval(EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp;
+ // Temporarily treat bridge nodes as FREE RandomVariables.
+ for (const auto& var : bridgeNodes) {
+ context->at(var).originalType = var->type;
+ var->type = RandomVariableType::FREE;
+ }
+ for (const auto& var : evalOrder) {
+ context->at(var).staging.clear();
+ NN_FUZZER_LOG << " - " << toString(var, context);
+ }
+ bool success = evalSubnetWithBruteForce(evalOrder, context);
+ // Reset the RandomVariable types for bridge nodes.
+ for (const auto& var : bridgeNodes) var->type = context->at(var).originalType;
+ return success;
+ }
+};
+
+// Partition the network further into LocalNetworks based on the result from bridge annotation
+// algorithm.
+class GraphPartitioner : public DisjointNetwork {
+ public:
+ GraphPartitioner() = default;
+
+ std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) {
+ annotateBridge(evalOrder);
+ for (const auto& var : evalOrder) add(var);
+ return get(timestamp);
+ }
+
+ private:
+ GraphPartitioner(const GraphPartitioner&) = delete;
+ GraphPartitioner& operator=(const GraphPartitioner&) = delete;
+
+ // Find the parent-child relationship between var1 and var2, and reset the bridge.
+ void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) {
+ if (var1->parent1 == var2) {
+ mBridgeInfo[var1].isParent1Bridge = true;
+ } else if (var1->parent2 == var2) {
+ mBridgeInfo[var1].isParent2Bridge = true;
+ } else {
+ setBridgeFlag(var2, var1);
+ }
+ }
+
+ // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is
+ // reachable from a node in the subtree of b. The complexity is O(V + E).
+ // discoveryTime: The timestamp a node is visited
+ // lowTime: The min discovery time of all reachable nodes from the subtree of the node.
+ void annotateBridgeHelper(const RandomVariableNode& var, int* time) {
+ mBridgeInfo[var].visited = true;
+ mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++;
+
+ // The algorithm operates on undirected graph. First find all adjacent nodes.
+ auto adj = var->children;
+ if (var->parent1 != nullptr) adj.push_back(var->parent1);
+ if (var->parent2 != nullptr) adj.push_back(var->parent2);
+
+ for (const auto& weakChild : adj) {
+ auto child = weakChild.lock();
+ NN_FUZZER_CHECK(child != nullptr);
+ if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue;
+ if (!mBridgeInfo[child].visited) {
+ mBridgeInfo[child].parent = var;
+ annotateBridgeHelper(child, time);
+
+ // If none of nodes in the subtree of child is connected to any ancestors of var,
+ // then it is a bridge.
+ mBridgeInfo[var].lowTime =
+ std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime);
+ if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime)
+ setBridgeFlag(var, child);
+ } else if (mBridgeInfo[var].parent != child) {
+ mBridgeInfo[var].lowTime =
+ std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime);
+ }
+ }
+ }
+
+ // Find all bridges in the subnet with DFS.
+ void annotateBridge(const EvaluationOrder& evalOrder) {
+ for (const auto& var : evalOrder) mBridgeInfo[var];
+ int time = 0;
+ for (const auto& var : evalOrder) {
+ if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time);
+ }
+ }
+
+ // Re-partition the network by treating bridges as no edge.
+ void add(const RandomVariableNode& var) {
+ auto parent1 = var->parent1;
+ auto parent2 = var->parent2;
+ if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr;
+ if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr;
+ DisjointNetwork::add(var);
+ var->parent1 = parent1;
+ var->parent2 = parent2;
+ }
+
+ // Add bridge nodes to the local network and remove single node subnet.
+ std::vector<LocalNetwork> get(int timestamp) {
+ std::vector<LocalNetwork> res;
+ for (auto& pair : mEvalOrderMap) {
+ // We do not need to evaluate subnet with only a single node.
+ if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue;
+ res.emplace_back();
+ for (const auto& var : pair.second) {
+ if (mBridgeInfo[var].isParent1Bridge) {
+ res.back().evalOrder.push_back(var->parent1);
+ res.back().bridgeNodes.push_back(var->parent1);
+ }
+ if (mBridgeInfo[var].isParent2Bridge) {
+ res.back().evalOrder.push_back(var->parent2);
+ res.back().bridgeNodes.push_back(var->parent2);
+ }
+ res.back().evalOrder.push_back(var);
+ }
+ res.back().timestamp = timestamp;
+ }
+ return res;
+ }
+
+ // For bridge discovery algorithm.
+ struct BridgeInfo {
+ bool isParent1Bridge = false;
+ bool isParent2Bridge = false;
+ int discoveryTime = 0;
+ int lowTime = 0;
+ bool visited = false;
+ std::shared_ptr<RandomVariableBase> parent = nullptr;
+ };
+ std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo;
+};
+
+// Evaluate subnets repeatedly until converge.
+// Class T_Subnet must have member evalOrder, timestamp, and member function eval.
+template <class T_Subnet>
+inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) {
+ bool terminate = false;
+ while (!terminate) {
+ terminate = true;
+ for (auto& subnet : *subnets) {
+ if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) {
+ if (!subnet.eval(context)) return false;
+ subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ terminate = false;
+ }
+ }
+ }
+ return true;
+}
+
+// Evaluate the subnet by first partitioning it further into LocalNetworks.
+static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate with local network";
+ auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp);
+ return evalSubnetsRepeatedly(&localNetworks, context);
+}
+
+struct LeafNetwork {
+ EvaluationOrder evalOrder;
+ int timestamp = 0;
+ LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) {
+ std::set<RandomVariableNode> visited;
+ constructorHelper(var, &visited);
+ }
+ // Construct the leaf network by recursively including parent nodes.
+ void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) {
+ if (var == nullptr || visited->find(var) != visited->end()) return;
+ constructorHelper(var->parent1, visited);
+ constructorHelper(var->parent2, visited);
+ visited->insert(var);
+ evalOrder.push_back(var);
+ }
+ bool eval(EvalContext* context) {
+ return evalSubnetWithLocalNetwork(evalOrder, timestamp, context);
+ }
+};
+
+// Evaluate the subnet by leaf network.
+// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%).
+// The random graph generator is expected to retry if it fails.
+static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp,
+ EvalContext* context) {
+ NN_FUZZER_LOG << "Evaluate with leaf network";
+ // Construct leaf networks.
+ std::vector<LeafNetwork> leafNetworks;
+ for (const auto& var : evalOrder) {
+ if (var->children.empty()) {
+ NN_FUZZER_LOG << "Found leaf " << toString(var, context);
+ leafNetworks.emplace_back(var, timestamp);
+ }
+ }
+ return evalSubnetsRepeatedly(&leafNetworks, context);
+}
+
+void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) {
+ if (dims.size() <= 1) return;
+ EvaluationOrder order;
+ for (const auto& dim : dims) order.push_back(dim.get());
+ mDimProd.push_back(order);
+}
+
+bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd,
+ const std::unordered_map<RandomVariableNode, int>& indexMap,
+ EvalContext* context, std::set<int>* dirtySubnets) {
+ for (auto& evalOrder : mDimProd) {
+ NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size();
+ // Initialize EvalInfo of each RandomVariable.
+ for (auto& var : evalOrder) {
+ if (context->find(var) == context->end()) context->emplace(var, var);
+ NN_FUZZER_LOG << " - " << toString(var, context);
+ }
+
+ // Enforce the product of the dimension values below kMaxValue:
+ // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...)
+ int prod = 1;
+ for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin());
+ for (auto& var : evalOrder) {
+ auto& committed = context->at(var).committed;
+ int maxValue = kMaxValue / (prod / *committed.begin());
+ auto it = committed.upper_bound(maxValue);
+ // var has empty range -> no solution.
+ if (it == committed.begin()) return false;
+ // The range is not modified -> continue.
+ if (it == committed.end()) continue;
+ // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation.
+ committed.erase(it, committed.end());
+ context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime();
+ dirtySubnets->insert(indexMap.at(var));
+ }
+ }
+ return true;
+}
+
+bool RandomVariableNetwork::evalRange() {
+ constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500;
+ constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5;
+ NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks";
+ EvalContext context;
+ std::set<int> dirtySubnets; // Which subnets needs evaluation.
+ for (auto& pair : mEvalOrderMap) {
+ const auto& evalOrder = pair.second;
+ // Decide whether needs evaluation by timestamp -- if no range has changed after the last
+ // evaluation, then the subnet does not need re-evaluation.
+ if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue;
+ dirtySubnets.insert(pair.first);
+ }
+ if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
+
+ // Repeat until the ranges converge.
+ while (!dirtySubnets.empty()) {
+ for (int ind : dirtySubnets) {
+ const auto& evalOrder = mEvalOrderMap[ind];
+ NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size();
+
+ // Initialize EvalInfo of each RandomVariable.
+ for (auto& var : evalOrder) {
+ if (context.find(var) == context.end()) context.emplace(var, var);
+ NN_FUZZER_LOG << " - " << toString(var, &context);
+ }
+
+ // Dispatch to different algorithm according to search range.
+ bool success;
+ uint64_t numCombinations = getNumCombinations(evalOrder);
+ if (numCombinations <= kMaxNumCombinationsWithBruteForce) {
+ success = evalSubnetWithBruteForce(evalOrder, &context);
+ } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) {
+ success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context);
+ } else {
+ success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context);
+ }
+ if (!success) return false;
+ }
+ dirtySubnets.clear();
+ if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false;
+ }
+ // A successful evaluation, update RandomVariables from EvalContext.
+ for (auto& pair : context) pair.second.updateRange();
+ mTimestamp = getGlobalTime();
+ NN_FUZZER_LOG << "Finish range evaluation";
+ return true;
+}
+
+static void unsetEqual(const RandomVariableNode& node) {
+ if (node == nullptr) return;
+ NN_FUZZER_LOG << "Unset equality of var" << node->index;
+ auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) {
+ return ptr.lock() == node;
+ };
+ RandomVariableNode parent1 = node->parent1, parent2 = node->parent2;
+ parent1->children.erase(
+ std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual));
+ node->parent1 = nullptr;
+ if (parent2 != nullptr) {
+ // For Equal.
+ parent2->children.erase(
+ std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual));
+ node->parent2 = nullptr;
+ } else {
+ // For UnaryEqual.
+ node->type = RandomVariableType::FREE;
+ node->op = nullptr;
+ }
+}
+
+// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is
+// constructed. Only used when setEqualIfCompatible results in incompatible.
+class RandomVariableNetwork::Reverter {
+ public:
+ // Take a snapshot of RandomVariableNetwork when Reverter is constructed.
+ Reverter() : mSnapshot(*RandomVariableNetwork::get()) {}
+ // Add constraint (Equal) nodes to the reverter.
+ void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); }
+ void revert() {
+ NN_FUZZER_LOG << "Revert RandomVariableNetwork";
+ // Release the constraints.
+ for (const auto& node : mEqualNodes) unsetEqual(node);
+ // Reset all member variables.
+ *RandomVariableNetwork::get() = std::move(mSnapshot);
+ }
+
+ private:
+ Reverter(const Reverter&) = delete;
+ Reverter& operator=(const Reverter&) = delete;
+ RandomVariableNetwork mSnapshot;
+ std::vector<RandomVariableNode> mEqualNodes;
+};
+
+bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs,
+ const std::vector<RandomVariable>& rhs) {
+ NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {"
+ << joinStr(", ", rhs) << "}";
+ if (lhs.size() != rhs.size()) return false;
+ Reverter reverter;
+ bool result = true;
+ for (size_t i = 0; i < lhs.size(); i++) {
+ auto node = lhs[i].setEqual(rhs[i]).get();
+ reverter.addNode(node);
+ // Early terminate if there is no common choice between two ranges.
+ if (node != nullptr && node->range.empty()) result = false;
+ }
+ result = result && evalRange();
+ if (!result) reverter.revert();
+ NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]");
+ return result;
+}
+
+bool RandomVariableNetwork::freeze() {
+ NN_FUZZER_LOG << "Freeze the random network";
+ if (!evalRange()) return false;
+
+ std::vector<RandomVariableNode> nodes;
+ for (const auto& pair : mEvalOrderMap) {
+ // Find all FREE RandomVariables in the subnet.
+ for (const auto& var : pair.second) {
+ if (var->type == RandomVariableType::FREE) nodes.push_back(var);
+ }
+ }
+
+ // Randomly shuffle the order, this is for a more uniform randomness.
+ randomShuffle(&nodes);
+
+ // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable.
+ // TODO: Might be able to optimize this.
+ for (const auto& var : nodes) {
+ if (var->type != RandomVariableType::FREE) continue;
+ size_t size = var->range.size();
+ NN_FUZZER_LOG << "Freeze " << var;
+ var->freeze();
+ NN_FUZZER_LOG << " " << var;
+ // There is no need to re-evaluate if the FREE RandomVariable have only one choice.
+ if (size > 1) {
+ var->updateTimestamp();
+ if (!evalRange()) {
+ NN_FUZZER_LOG << "Freeze failed at " << var;
+ return false;
+ }
+ }
+ }
+ NN_FUZZER_LOG << "Finish freezing the random network";
+ return true;
+}
+
+} // namespace fuzzing_test
+} // namespace nn
+} // namespace android
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 2047cbe04..6e71652a9 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -41,7 +41,6 @@
#include "SampleDriverFull.h"
using android::nn::sample_driver::SampleDriverFull;
-using namespace android::nn::hal;
#endif
@@ -66,27 +65,27 @@ class TestDriverV1_1 : public V1_1::IDevice {
TestDriverV1_1()
: mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {}
static constexpr char name[] = "TestDriverV1_1";
- Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
}
- Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
- getSupportedOperations_1_1_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations_1_1(
+ const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel_1_1(
- const V1_1::Model& model, ExecutionPreference preference,
+ hardware::Return<V1_0::ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel(model, actualCallback);
@@ -102,19 +101,19 @@ class TestDriverV1_0 : public V1_0::IDevice {
TestDriverV1_0()
: mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {}
static constexpr char name[] = "TestDriverV1_0";
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
return mDriverV1_2->getCapabilities(_hidl_cb);
}
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
+ hardware::Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
}
- Return<V1_0::ErrorStatus> prepareModel(
+ hardware::Return<V1_0::ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
return mDriverV1_2->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
private:
const sp<V1_2::IDevice> mDriverV1_2;
diff --git a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
index 8fa93327f..53b5aad17 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
+++ b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -310,7 +310,7 @@ inline void defaultScalarOperandConstructor(TestOperandType dataType, uint32_t,
op->zeroPoint = 0;
break;
default:
- NN_FUZZER_CHECK(false) << "Data type " << toString(dataType)
+ NN_FUZZER_CHECK(false) << "Data type " << dataType
<< " is not supported in defaultScalarOperandConstructor.";
}
}