summaryrefslogtreecommitdiff
path: root/nn/runtime/test/TestNeuralNetworksWrapper.h
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2020-01-31 18:35:07 -0800
committerMichael Butler <butlermichael@google.com>2020-02-03 18:44:42 -0800
commit2aa3ded127e8e44a2284bd8c999df296a5e2f25f (patch)
tree42a621b061e560a2312695a67f5f851da7101541 /nn/runtime/test/TestNeuralNetworksWrapper.h
parent69a5c5d91cdab229ff6f92f5c5f47e9168fc851d (diff)
downloadml-2aa3ded127e8e44a2284bd8c999df296a5e2f25f.tar.gz
NNAPI runtime: Remove priority from prepareModelFromCache
prepareModelFromCache_1_3 uses model cache and data cache that represent a prepared model. Any argument that contributes in a meaningful way to the prepared model is implicitly included in this model cache and data cache. For example, "model" and "executionPreference" appear in prepareModel_1_3 but not in prepareModelFromCache_1_3 because they are implicitly included in the model cache and data cache. In a similar way, because it could affect the resulting model, "priority" should be removed from prepareModelFromCache_1_3. Fixes: 148802784 Test: mma Test: CtsNNAPITestCases Test: NeuralNetworksTest_static Change-Id: I83735ef8c3a10d1dbd58235d44e2c0067413cc79
Diffstat (limited to 'nn/runtime/test/TestNeuralNetworksWrapper.h')
-rw-r--r--nn/runtime/test/TestNeuralNetworksWrapper.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/nn/runtime/test/TestNeuralNetworksWrapper.h b/nn/runtime/test/TestNeuralNetworksWrapper.h
index 4a677cebe..236746888 100644
--- a/nn/runtime/test/TestNeuralNetworksWrapper.h
+++ b/nn/runtime/test/TestNeuralNetworksWrapper.h
@@ -37,6 +37,7 @@ namespace test_wrapper {
using wrapper::Event;
using wrapper::ExecutePreference;
+using wrapper::ExecutePriority;
using wrapper::ExtensionModel;
using wrapper::ExtensionOperandParams;
using wrapper::ExtensionOperandType;
@@ -254,6 +255,11 @@ class Compilation {
mCompilation, static_cast<int32_t>(preference)));
}
+ Result setPriority(ExecutePriority priority) {
+ return static_cast<Result>(ANeuralNetworksCompilation_setPriority(
+ mCompilation, static_cast<int32_t>(priority)));
+ }
+
Result setCaching(const std::string& cacheDir, const std::vector<uint8_t>& token) {
if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN) {
return Result::BAD_DATA;