summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2019-09-07 19:06:54 -0700
committerMichael Butler <butlermichael@google.com>2019-09-11 11:06:43 -0700
commit4906200354133ef0565eb599de7e88a3432d5a11 (patch)
treeaa23c55dfd21516d923cd05a2cecb8987fbdb0c1
parentceecaab5179b49babcfa686567554276c38be305 (diff)
downloadml-4906200354133ef0565eb599de7e88a3432d5a11.tar.gz
Cleanup CacheToken and HalInterfaces
Prior to this CL, many different NN runtime files declared HidlToken. This CL relocates the HidlToken definition to HalInterfaces.h and renames it to CacheToken. This CL additionally removes the redundant hardware::interfaces:: qualifier in HalInterfaces.h. Bug: N/A Test: mma Change-Id: Ic7dc75b982d9d511a357f0ed5cb4bea1e79b0345 Merged-In: Ic7dc75b982d9d511a357f0ed5cb4bea1e79b0345 (cherry picked from commit a34d9c7f0c58362d0c345e38f130e2b380e93b4b)
-rw-r--r--nn/common/include/HalInterfaces.h71
-rw-r--r--nn/driver/sample/SampleDriver.cpp4
-rw-r--r--nn/driver/sample/SampleDriver.h5
-rw-r--r--nn/runtime/ExecutionBuilder.cpp2
-rw-r--r--nn/runtime/ExecutionPlan.cpp6
-rw-r--r--nn/runtime/Manager.cpp16
-rw-r--r--nn/runtime/Manager.h21
-rw-r--r--nn/runtime/VersionedInterfaces.cpp6
-rw-r--r--nn/runtime/VersionedInterfaces.h10
-rw-r--r--nn/runtime/test/TestCompilationCaching.cpp5
-rw-r--r--nn/runtime/test/TestExecution.cpp3
-rw-r--r--nn/runtime/test/TestIntrospectionControl.cpp3
-rw-r--r--nn/runtime/test/TestPartitioning.cpp5
-rw-r--r--nn/runtime/test/TestPartitioningRandom.cpp3
14 files changed, 72 insertions, 88 deletions
diff --git a/nn/common/include/HalInterfaces.h b/nn/common/include/HalInterfaces.h
index 6df2ebb22..5b48ecbba 100644
--- a/nn/common/include/HalInterfaces.h
+++ b/nn/common/include/HalInterfaces.h
@@ -36,7 +36,7 @@
namespace android::nn::hal {
using android::sp;
-using hardware::hidl_array;
+
using hardware::hidl_death_recipient;
using hardware::hidl_enum_range;
using hardware::hidl_handle;
@@ -45,38 +45,7 @@ using hardware::hidl_string;
using hardware::hidl_vec;
using hardware::Return;
using hardware::Void;
-using hardware::neuralnetworks::V1_0::DataLocation;
-using hardware::neuralnetworks::V1_0::DeviceStatus;
-using hardware::neuralnetworks::V1_0::ErrorStatus;
-using hardware::neuralnetworks::V1_0::FusedActivationFunc;
-using hardware::neuralnetworks::V1_0::OperandLifeTime;
-using hardware::neuralnetworks::V1_0::PerformanceInfo;
-using hardware::neuralnetworks::V1_0::Request;
-using hardware::neuralnetworks::V1_0::RequestArgument;
-using hardware::neuralnetworks::V1_1::ExecutionPreference;
-using hardware::neuralnetworks::V1_2::Capabilities;
-using hardware::neuralnetworks::V1_2::Constant;
-using hardware::neuralnetworks::V1_2::DeviceType;
-using hardware::neuralnetworks::V1_2::Extension;
-using hardware::neuralnetworks::V1_2::FmqRequestDatum;
-using hardware::neuralnetworks::V1_2::FmqResultDatum;
-using hardware::neuralnetworks::V1_2::IBurstCallback;
-using hardware::neuralnetworks::V1_2::IBurstContext;
-using hardware::neuralnetworks::V1_2::IDevice;
-using hardware::neuralnetworks::V1_2::IExecutionCallback;
-using hardware::neuralnetworks::V1_2::IPreparedModel;
-using hardware::neuralnetworks::V1_2::IPreparedModelCallback;
-using hardware::neuralnetworks::V1_2::MeasureTiming;
-using hardware::neuralnetworks::V1_2::Model;
-using hardware::neuralnetworks::V1_2::Operand;
-using hardware::neuralnetworks::V1_2::OperandType;
-using hardware::neuralnetworks::V1_2::OperandTypeRange;
-using hardware::neuralnetworks::V1_2::Operation;
-using hardware::neuralnetworks::V1_2::OperationType;
-using hardware::neuralnetworks::V1_2::OperationTypeRange;
-using hardware::neuralnetworks::V1_2::OutputShape;
-using hardware::neuralnetworks::V1_2::SymmPerChannelQuantParams;
-using hardware::neuralnetworks::V1_2::Timing;
+
using hidl::allocator::V1_0::IAllocator;
using hidl::memory::V1_0::IMemory;
@@ -84,6 +53,42 @@ namespace V1_0 = hardware::neuralnetworks::V1_0;
namespace V1_1 = hardware::neuralnetworks::V1_1;
namespace V1_2 = hardware::neuralnetworks::V1_2;
+using V1_0::DataLocation;
+using V1_0::DeviceStatus;
+using V1_0::ErrorStatus;
+using V1_0::FusedActivationFunc;
+using V1_0::OperandLifeTime;
+using V1_0::PerformanceInfo;
+using V1_0::Request;
+using V1_0::RequestArgument;
+using V1_1::ExecutionPreference;
+using V1_2::Capabilities;
+using V1_2::Constant;
+using V1_2::DeviceType;
+using V1_2::Extension;
+using V1_2::FmqRequestDatum;
+using V1_2::FmqResultDatum;
+using V1_2::IBurstCallback;
+using V1_2::IBurstContext;
+using V1_2::IDevice;
+using V1_2::IExecutionCallback;
+using V1_2::IPreparedModel;
+using V1_2::IPreparedModelCallback;
+using V1_2::MeasureTiming;
+using V1_2::Model;
+using V1_2::Operand;
+using V1_2::OperandType;
+using V1_2::OperandTypeRange;
+using V1_2::Operation;
+using V1_2::OperationType;
+using V1_2::OperationTypeRange;
+using V1_2::OutputShape;
+using V1_2::SymmPerChannelQuantParams;
+using V1_2::Timing;
+
+using CacheToken =
+ hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
} // namespace android::nn::hal
#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_INTERFACES_H
diff --git a/nn/driver/sample/SampleDriver.cpp b/nn/driver/sample/SampleDriver.cpp
index 0cc2d256e..701eab630 100644
--- a/nn/driver/sample/SampleDriver.cpp
+++ b/nn/driver/sample/SampleDriver.cpp
@@ -187,14 +187,14 @@ Return<ErrorStatus> SampleDriver::prepareModel_1_1(
Return<ErrorStatus> SampleDriver::prepareModel_1_2(
const V1_2::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
- const hidl_vec<hidl_handle>&, const HidlToken&,
+ const hidl_vec<hidl_handle>&, const CacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_2");
return prepareModelBase(model, this, preference, callback);
}
Return<ErrorStatus> SampleDriver::prepareModelFromCache(
- const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const HidlToken&,
+ const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
"SampleDriver::prepareModelFromCache");
diff --git a/nn/driver/sample/SampleDriver.h b/nn/driver/sample/SampleDriver.h
index 0e1839b95..eb7b4688b 100644
--- a/nn/driver/sample/SampleDriver.h
+++ b/nn/driver/sample/SampleDriver.h
@@ -28,7 +28,6 @@ namespace nn {
namespace sample_driver {
using hardware::MQDescriptorSync;
-using HidlToken = hal::hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
// Base class used to create sample drivers for the NN HAL. This class
// provides some implementation of the more common functions.
@@ -62,11 +61,11 @@ class SampleDriver : public hal::IDevice {
hal::Return<hal::ErrorStatus> prepareModel_1_2(
const hal::V1_2::Model& model, hal::ExecutionPreference preference,
const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const HidlToken& token,
+ const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
hal::Return<hal::ErrorStatus> prepareModelFromCache(
const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache, const HidlToken& token,
+ const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
hal::Return<hal::DeviceStatus> getStatus() override;
diff --git a/nn/runtime/ExecutionBuilder.cpp b/nn/runtime/ExecutionBuilder.cpp
index c641d3243..ce6ae5ac8 100644
--- a/nn/runtime/ExecutionBuilder.cpp
+++ b/nn/runtime/ExecutionBuilder.cpp
@@ -43,8 +43,6 @@ namespace nn {
using namespace hal;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
-
const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
static MeasureTiming measureTiming(const ExecutionBuilder* execution) {
diff --git a/nn/runtime/ExecutionPlan.cpp b/nn/runtime/ExecutionPlan.cpp
index afce316e0..fe7e55d7b 100644
--- a/nn/runtime/ExecutionPlan.cpp
+++ b/nn/runtime/ExecutionPlan.cpp
@@ -58,8 +58,6 @@ namespace {
using namespace hal;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
-
// Opens cache file by filename and sets the handle to the opened fd. Returns false on fail. The
// handle is expected to come in as empty, and is only set to a fd when the function returns true.
// The file descriptor is always opened with both read and write permission.
@@ -128,7 +126,7 @@ bool compileFromCache(const std::shared_ptr<Device>& device, const std::string&
CHECK(token != nullptr && device != nullptr);
VLOG(COMPILATION) << "compileFromCache";
*preparedModel = nullptr;
- HidlToken cacheToken(token);
+ CacheToken cacheToken(token);
hidl_vec<hidl_handle> modelCache, dataCache;
NN_RET_CHECK(getCacheHandles(cacheDir, token, device->getNumberOfCacheFilesNeeded(),
/*createIfNotExist=*/false, &modelCache, &dataCache));
@@ -142,7 +140,7 @@ int compileModelAndCache(const std::shared_ptr<Device>& device, const ModelBuild
CHECK(device != nullptr);
*preparedModel = nullptr;
uint8_t dummyToken[ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN] = {0};
- HidlToken cacheToken(token == nullptr ? dummyToken : token);
+ CacheToken cacheToken(token == nullptr ? dummyToken : token);
hidl_vec<hidl_handle> modelCache, dataCache;
if (token == nullptr || !getCacheHandles(cacheDir, token, device->getNumberOfCacheFilesNeeded(),
/*createIfNotExist=*/true, &modelCache, &dataCache)) {
diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp
index c48e66140..c8e691a9a 100644
--- a/nn/runtime/Manager.cpp
+++ b/nn/runtime/Manager.cpp
@@ -47,8 +47,6 @@ namespace nn {
using namespace hal;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
-
const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
bool Device::isCachingSupported() const {
@@ -85,10 +83,10 @@ class DriverDevice : public Device {
int prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const HidlToken& token,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
std::shared_ptr<PreparedModel>* preparedModel) const override;
int prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const HidlToken& token,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
std::shared_ptr<PreparedModel>* preparedModel) const override;
private:
@@ -282,7 +280,7 @@ static int prepareModelCheck(ErrorStatus status,
int DriverDevice::prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const HidlToken& token,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
std::shared_ptr<PreparedModel>* preparedModel) const {
// Note that some work within VersionedIDevice will be subtracted from the IPC layer
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModel");
@@ -295,7 +293,7 @@ int DriverDevice::prepareModel(const Model& hidlModel, ExecutionPreference execu
int DriverDevice::prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
const hidl_vec<hidl_handle>& dataCache,
- const HidlToken& token,
+ const CacheToken& token,
std::shared_ptr<PreparedModel>* preparedModel) const {
// Note that some work within VersionedIDevice will be subtracted from the IPC layer
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModelFromCache");
@@ -522,10 +520,10 @@ class CpuDevice : public Device {
int prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const HidlToken&,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken&,
std::shared_ptr<PreparedModel>* preparedModel) const override;
int prepareModelFromCache(const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
- const HidlToken&, std::shared_ptr<PreparedModel>*) const override {
+ const CacheToken&, std::shared_ptr<PreparedModel>*) const override {
CHECK(false) << "Should never call prepareModelFromCache on CpuDevice";
return ANEURALNETWORKS_OP_FAILED;
}
@@ -586,7 +584,7 @@ void CpuDevice::getSupportedOperations(const MetaModel& metaModel,
int CpuDevice::prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const HidlToken&,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken&,
std::shared_ptr<PreparedModel>* preparedModel) const {
CHECK(modelCache.size() == 0 && dataCache.size() == 0)
<< "Should never call prepareModel with cache information on CpuDevice";
diff --git a/nn/runtime/Manager.h b/nn/runtime/Manager.h
index 691f1be53..c239f1c64 100644
--- a/nn/runtime/Manager.h
+++ b/nn/runtime/Manager.h
@@ -84,17 +84,16 @@ class Device {
virtual std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const = 0;
bool isCachingSupported() const;
- virtual int prepareModel(
- const hal::Model& hidlModel, hal::ExecutionPreference executionPreference,
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache,
- const hal::hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>& token,
- std::shared_ptr<PreparedModel>* preparedModel) const = 0;
- virtual int prepareModelFromCache(
- const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache,
- const hal::hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>& token,
- std::shared_ptr<PreparedModel>* preparedModel) const = 0;
+ virtual int prepareModel(const hal::Model& hidlModel,
+ hal::ExecutionPreference executionPreference,
+ const hal::hidl_vec<hal::hidl_handle>& modelCache,
+ const hal::hidl_vec<hal::hidl_handle>& dataCache,
+ const hal::CacheToken& token,
+ std::shared_ptr<PreparedModel>* preparedModel) const = 0;
+ virtual int prepareModelFromCache(const hal::hidl_vec<hal::hidl_handle>& modelCache,
+ const hal::hidl_vec<hal::hidl_handle>& dataCache,
+ const hal::CacheToken& token,
+ std::shared_ptr<PreparedModel>* preparedModel) const = 0;
};
// Manages the NN HAL devices. Only one instance of this class will exist.
diff --git a/nn/runtime/VersionedInterfaces.cpp b/nn/runtime/VersionedInterfaces.cpp
index 4c4a33ecb..a414bdf44 100644
--- a/nn/runtime/VersionedInterfaces.cpp
+++ b/nn/runtime/VersionedInterfaces.cpp
@@ -100,8 +100,6 @@ namespace {
using namespace hal;
-using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
-
const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
void sendFailureMessage(const sp<IPreparedModelCallback>& cb) {
@@ -662,7 +660,7 @@ std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations(
std::pair<ErrorStatus, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepareModel(
const Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>& modelCache,
- const hidl_vec<hidl_handle>& dataCache, const HidlToken& token) {
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token) {
const std::pair<ErrorStatus, std::shared_ptr<VersionedIPreparedModel>> kFailure = {
ErrorStatus::GENERAL_FAILURE, nullptr};
@@ -781,7 +779,7 @@ std::pair<ErrorStatus, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevic
std::pair<ErrorStatus, std::shared_ptr<VersionedIPreparedModel>>
VersionedIDevice::prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
const hidl_vec<hidl_handle>& dataCache,
- const HidlToken& token) {
+ const CacheToken& token) {
const std::pair<ErrorStatus, std::shared_ptr<VersionedIPreparedModel>> kFailure = {
ErrorStatus::GENERAL_FAILURE, nullptr};
diff --git a/nn/runtime/VersionedInterfaces.h b/nn/runtime/VersionedInterfaces.h
index 34202017e..3572e4119 100644
--- a/nn/runtime/VersionedInterfaces.h
+++ b/nn/runtime/VersionedInterfaces.h
@@ -241,10 +241,7 @@ class VersionedIDevice {
std::pair<hal::ErrorStatus, std::shared_ptr<VersionedIPreparedModel>> prepareModel(
const hal::Model& model, hal::ExecutionPreference preference,
const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache,
- const hal::hidl_array<uint8_t,
- static_cast<uint32_t>(hal::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>&
- token);
+ const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token);
/**
* Creates a prepared model from cache files for execution.
@@ -314,10 +311,7 @@ class VersionedIDevice {
*/
std::pair<hal::ErrorStatus, std::shared_ptr<VersionedIPreparedModel>> prepareModelFromCache(
const hal::hidl_vec<hal::hidl_handle>& modelCache,
- const hal::hidl_vec<hal::hidl_handle>& dataCache,
- const hal::hidl_array<uint8_t,
- static_cast<uint32_t>(hal::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>&
- token);
+ const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token);
/**
* Returns the current status of a driver.
diff --git a/nn/runtime/test/TestCompilationCaching.cpp b/nn/runtime/test/TestCompilationCaching.cpp
index e180b2880..e0e29871a 100644
--- a/nn/runtime/test/TestCompilationCaching.cpp
+++ b/nn/runtime/test/TestCompilationCaching.cpp
@@ -28,7 +28,6 @@ using namespace android::nn;
using namespace hal;
using Result = test_wrapper::Result;
using Type = test_wrapper::Type;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
template <typename T>
using MQDescriptorSync = ::android::hardware::MQDescriptorSync<T>;
@@ -141,7 +140,7 @@ class CachingDriver : public sample_driver::SampleDriver {
Return<ErrorStatus> prepareModel_1_2(const Model&, ExecutionPreference,
const hidl_vec<hidl_handle>& modelCacheHandle,
const hidl_vec<hidl_handle>& dataCacheHandle,
- const HidlToken&,
+ const CacheToken&,
const sp<IPreparedModelCallback>& cb) override {
checkNumberOfCacheHandles(modelCacheHandle.size(), dataCacheHandle.size());
if (modelCacheHandle.size() != 0 || dataCacheHandle.size() != 0) {
@@ -159,7 +158,7 @@ class CachingDriver : public sample_driver::SampleDriver {
// mErrorStatusPrepareFromCache, sets mHasCalledPrepareModelFromCache.
Return<ErrorStatus> prepareModelFromCache(
const hidl_vec<hidl_handle>& modelCacheHandle,
- const hidl_vec<hidl_handle>& dataCacheHandle, const HidlToken&,
+ const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) override {
readFromCache(modelCacheHandle, mModelCacheData);
readFromCache(dataCacheHandle, mDataCacheData);
diff --git a/nn/runtime/test/TestExecution.cpp b/nn/runtime/test/TestExecution.cpp
index e25d2ff7e..e4c8d2168 100644
--- a/nn/runtime/test/TestExecution.cpp
+++ b/nn/runtime/test/TestExecution.cpp
@@ -39,7 +39,6 @@ using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
using HidlModel = V1_2::Model;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
using PreparedModelCallback = nn::PreparedModelCallback;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
@@ -186,7 +185,7 @@ class TestDriver12 : public SampleDriver {
Return<ErrorStatus> prepareModel_1_2(
const HidlModel& model, ExecutionPreference preference,
const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
- const HidlToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
+ const CacheToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
Return<ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
model, preference, modelCache, dataCache, token, localCallback);
diff --git a/nn/runtime/test/TestIntrospectionControl.cpp b/nn/runtime/test/TestIntrospectionControl.cpp
index ab7c314f5..e25769af3 100644
--- a/nn/runtime/test/TestIntrospectionControl.cpp
+++ b/nn/runtime/test/TestIntrospectionControl.cpp
@@ -43,7 +43,6 @@ using DeviceManager = nn::DeviceManager;
using ExecutePreference = nn::test_wrapper::ExecutePreference;
using ExecutionBurstServer = nn::ExecutionBurstServer;
using HidlModel = V1_2::Model;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
using PreparedModelCallback = nn::PreparedModelCallback;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
@@ -437,7 +436,7 @@ class TestDriver12 : public SampleDriver {
Return<ErrorStatus> prepareModel_1_2(const HidlModel& model, ExecutionPreference,
const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
- const HidlToken&,
+ const CacheToken&,
const sp<IPreparedModelCallback>& callback) override {
callback->notify_1_2(ErrorStatus::NONE, new TestPreparedModel12(model, this, mSuccess));
return ErrorStatus::NONE;
diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp
index c4f792eb6..11ec94479 100644
--- a/nn/runtime/test/TestPartitioning.cpp
+++ b/nn/runtime/test/TestPartitioning.cpp
@@ -139,7 +139,6 @@ using ExecutionPlan = ::android::nn::ExecutionPlan;
using ExecutionStep = ::android::nn::ExecutionStep;
using HalVersion = ::android::nn::HalVersion;
using HidlModel = V1_2::Model;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
using ModelBuilder = ::android::nn::ModelBuilder;
using Result = ::android::nn::test_wrapper::Result;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
@@ -329,7 +328,7 @@ class PartitioningDriver : public SampleDriver {
Return<ErrorStatus> prepareModel_1_2(const Model& model, ExecutionPreference,
const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
- const HidlToken&,
+ const CacheToken&,
const sp<IPreparedModelCallback>& cb) override {
ErrorStatus status = ErrorStatus::NONE;
if (mOEM != OEMYes) {
@@ -381,7 +380,7 @@ class PartitioningDriver : public SampleDriver {
}
Return<ErrorStatus> prepareModelFromCache(
- const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const HidlToken&,
+ const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) override {
callback->notify_1_2(ErrorStatus::NONE, new PartitioningPreparedModel);
return ErrorStatus::NONE;
diff --git a/nn/runtime/test/TestPartitioningRandom.cpp b/nn/runtime/test/TestPartitioningRandom.cpp
index e591f1697..faf28d782 100644
--- a/nn/runtime/test/TestPartitioningRandom.cpp
+++ b/nn/runtime/test/TestPartitioningRandom.cpp
@@ -97,7 +97,6 @@ using DeviceManager = nn::DeviceManager;
using ExecutionPlan = nn::ExecutionPlan;
using HalVersion = nn::HalVersion;
using HidlModel = V1_2::Model;
-using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
using ModelBuilder = nn::ModelBuilder;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
@@ -553,7 +552,7 @@ class TestDriver : public SampleDriver {
Return<ErrorStatus> prepareModel_1_2(const HidlModel& model, ExecutionPreference preference,
const hidl_vec<hidl_handle>& modelCache,
const hidl_vec<hidl_handle>& dataCache,
- const HidlToken& token,
+ const CacheToken& token,
const sp<IPreparedModelCallback>& callback) override {
// NOTE: We verify that all operations in the model are supported.
ErrorStatus outStatus = ErrorStatus::INVALID_ARGUMENT;