summaryrefslogtreecommitdiff
path: root/nn/runtime/Manager.cpp
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2019-03-07 19:07:15 -0800
committerMichael Butler <butlermichael@google.com>2019-04-24 23:51:33 +0000
commit94b5e65135deecf161c7a7930477f01bc6faf258 (patch)
tree522b21a393cbac0a74e05feaa6c56fe3865ae951 /nn/runtime/Manager.cpp
parentb747ed1be6106aa00b1efd09408d00cea782c32b (diff)
downloadml-94b5e65135deecf161c7a7930477f01bc6faf258.tar.gz
Cleanup VersionedIPreparedModel and PreparedModelCallback
This CL changes VersionedIPreparedModel to be synchronous. It returns the results by value instead of requiring a callback be passed in. Additionally, the IPreparedModel is wrapped in VersionedIPreparedModel before being returned. This CL additionally simplifies PreparedModelCallback by (1) decoupling it from the base, (2) removing functions that are not used. Bug: 118624080 Test: mma Test: atest NeuralNetworksTest_static Test: atest VtsHalNeuralnetworksV1_0TargetTest (with sample-all) Test: atest VtsHalNeuralnetworksV1_1TargetTest (with sample-all) Test: atest VtsHalNeuralnetworksV1_2TargetTest (with sample-all) Change-Id: Ic7587e0680ca7ccdc6cf429d9537396540ab3c0d (cherry picked from commit b2584cdb9b275c9c3e876e0c895b09937e066a6e)
Diffstat (limited to 'nn/runtime/Manager.cpp')
-rw-r--r--nn/runtime/Manager.cpp67
1 files changed, 23 insertions, 44 deletions
diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp
index fadecc275..6c0874ed9 100644
--- a/nn/runtime/Manager.cpp
+++ b/nn/runtime/Manager.cpp
@@ -31,7 +31,6 @@
#include <functional>
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
namespace android {
@@ -81,12 +80,6 @@ class DriverDevice : public Device {
std::shared_ptr<VersionedIPreparedModel>* preparedModel) override;
private:
- int prepareModelHelper(
- const std::function<Return<ErrorStatus>(const sp<PreparedModelCallback>& callback)>&
- prepare,
- const std::string& prepareName,
- std::shared_ptr<VersionedIPreparedModel>* preparedModel);
-
std::string mName;
std::string mVersionString;
const std::shared_ptr<VersionedIDevice> mInterface;
@@ -232,37 +225,24 @@ PerformanceInfo DriverDevice::getPerformance(OperandType type) const {
return lookup(mCapabilities.operandPerformance, type);
}
-// Compilation logic copied from StepExecutor::startComputeOnDevice().
-int DriverDevice::prepareModelHelper(
- const std::function<Return<ErrorStatus>(const sp<PreparedModelCallback>& callback)>&
- prepare,
- const std::string& prepareName, std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
- *preparedModel = nullptr;
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+static int prepareModelCheck(ErrorStatus status,
+ const std::shared_ptr<VersionedIPreparedModel>& preparedModel,
+ const char* prepareName, const char* serviceName,
+ std::shared_ptr<VersionedIPreparedModel>* preparedModelOut) {
+ CHECK(preparedModelOut != nullptr) << "prepareModelCheck -- preparedModelOut must be non-null";
+ *preparedModelOut = nullptr;
- Return<ErrorStatus> prepareLaunchStatus = prepare(preparedModelCallback);
- if (!prepareLaunchStatus.isOk()) {
- LOG(ERROR) << prepareName << " compilation failed due to transport error: "
- << prepareLaunchStatus.description();
+ if (status != ErrorStatus::NONE) {
+ LOG(ERROR) << prepareName << " on " << serviceName << " failed: "
+ << "prepareReturnStatus=" << toString(status);
return ANEURALNETWORKS_OP_FAILED;
}
- if (prepareLaunchStatus != ErrorStatus::NONE) {
- LOG(ERROR) << prepareName << " compilation failed with error: "
- << toString(static_cast<ErrorStatus>(prepareLaunchStatus));
+ if (preparedModel == nullptr) {
+ LOG(ERROR) << prepareName << " on " << serviceName << " failed: preparedModel is nullptr";
return ANEURALNETWORKS_OP_FAILED;
}
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- if (auto returnedPreparedModel = preparedModelCallback->getPreparedModel()) {
- *preparedModel = VersionedIPreparedModel::create(returnedPreparedModel);
- }
- if (prepareReturnStatus != ErrorStatus::NONE || *preparedModel == nullptr) {
- LOG(ERROR) << prepareName << " on " << getName() << " failed:"
- << " prepareReturnStatus=" << toString(prepareReturnStatus)
- << ", preparedModel=" << preparedModel->get();
- return ANEURALNETWORKS_OP_FAILED;
- }
+ *preparedModelOut = preparedModel;
return ANEURALNETWORKS_NO_ERROR;
}
@@ -272,13 +252,11 @@ int DriverDevice::prepareModel(const Model& hidlModel, ExecutionPreference execu
std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
// Note that some work within VersionedIDevice will be subtracted from the IPC layer
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModel");
- return prepareModelHelper(
- [this, &hidlModel, &executionPreference, &modelCache, &dataCache,
- &token](const sp<PreparedModelCallback>& callback) {
- return mInterface->prepareModel(hidlModel, executionPreference, modelCache,
- dataCache, token, callback);
- },
- "prepareModel", preparedModel);
+
+ const auto [status, localPreparedModel] =
+ mInterface->prepareModel(hidlModel, executionPreference, modelCache, dataCache, token);
+
+ return prepareModelCheck(status, localPreparedModel, "prepareModel", getName(), preparedModel);
}
int DriverDevice::prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
@@ -287,11 +265,12 @@ int DriverDevice::prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
// Note that some work within VersionedIDevice will be subtracted from the IPC layer
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModelFromCache");
- return prepareModelHelper(
- [this, &modelCache, &dataCache, &token](const sp<PreparedModelCallback>& callback) {
- return mInterface->prepareModelFromCache(modelCache, dataCache, token, callback);
- },
- "prepareModelFromCache", preparedModel);
+
+ const auto [status, localPreparedModel] =
+ mInterface->prepareModelFromCache(modelCache, dataCache, token);
+
+ return prepareModelCheck(status, localPreparedModel, "prepareModelFromCache", getName(),
+ preparedModel);
}
// A special abstracted device for the CPU. Only one instance of this class will exist.