summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gross <dgross@google.com>2017-11-30 18:04:07 -0800
committerDavid Gross <dgross@google.com>2017-12-01 15:22:32 -0800
commit4cb4fa2fe863a8f404c0448da160dd5bb4187db4 (patch)
tree4f02ef92dcb62d0e2d0169e2b1b2346040a6368d
parent3c33964e6cb0445c1dddbcec757342eefe8852fc (diff)
downloadml-4cb4fa2fe863a8f404c0448da160dd5bb4187db4.tar.gz
"Canonicalize" class RunTimePoolInfo.
This class manages an important resource (an mmap() memory) region. Rather than treating the class as a bundle of state with methods to explicitly manipulate that resource, give it the full C++ concrete data type treatment: Constructors, destructor, and move assignment, with all data private. Now the lifetime of the managed resource matches the lifetime of the class instance that manages the resource: The resource is freed when the instance is destroyed or assigned to. Test: nn/runtime/test Bug: 69685100 Bug: 69929024 Change-Id: Ib53a4c70d8db496d69a584d501f0aa6f74ddea99
-rw-r--r--nn/common/CpuExecutor.cpp114
-rw-r--r--nn/common/include/CpuExecutor.h39
-rw-r--r--nn/driver/sample/SampleDriver.cpp5
-rw-r--r--nn/driver/sample/SampleDriver.h2
-rw-r--r--nn/runtime/ExecutionBuilder.cpp24
-rw-r--r--nn/runtime/Memory.h15
6 files changed, 116 insertions, 83 deletions
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index ce6efd6eb..baa128af3 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -28,26 +28,25 @@ namespace nn {
// TODO: short term, make share memory mapping and updating a utility function.
// TODO: long term, implement mmap_fd as a hidl IMemory service.
-bool RunTimePoolInfo::set(const hidl_memory& hidlMemory) {
- if (buffer != nullptr) {
- release();
- }
+RunTimePoolInfo::RunTimePoolInfo(const hidl_memory& hidlMemory, bool* fail) {
+ sp<IMemory> memory;
+ uint8_t* buffer = nullptr;
- this->hidlMemory = hidlMemory;
auto memType = hidlMemory.name();
if (memType == "ashmem") {
memory = mapMemory(hidlMemory);
if (memory == nullptr) {
LOG(ERROR) << "Can't map shared memory.";
- return false;
+ if (fail) *fail = true;
+ return;
}
memory->update();
buffer = reinterpret_cast<uint8_t*>(static_cast<void*>(memory->getPointer()));
if (buffer == nullptr) {
LOG(ERROR) << "Can't access shared memory.";
- return false;
+ if (fail) *fail = true;
+ return;
}
- return true;
} else if (memType == "mmap_fd") {
size_t size = hidlMemory.size();
int fd = hidlMemory.handle()->data[0];
@@ -57,27 +56,55 @@ bool RunTimePoolInfo::set(const hidl_memory& hidlMemory) {
buffer = static_cast<uint8_t*>(mmap(nullptr, size, prot, MAP_SHARED, fd, offset));
if (buffer == MAP_FAILED) {
LOG(ERROR) << "RunTimePoolInfo::set(): Can't mmap the file descriptor.";
- return false;
+ if (fail) *fail = true;
+ return;
}
- return true;
} else {
LOG(ERROR) << "RunTimePoolInfo::set(): unsupported hidl_memory type";
- return false;
+ if (fail) *fail = true;
+ return;
+ }
+
+ mHidlMemory = hidlMemory;
+ mBuffer = buffer;
+ mMemory = memory;
+}
+
+RunTimePoolInfo::RunTimePoolInfo(uint8_t* buffer) {
+ mBuffer = buffer;
+}
+
+RunTimePoolInfo::RunTimePoolInfo(RunTimePoolInfo&& other) {
+ moveFrom(std::move(other));
+ other.mBuffer = nullptr;
+}
+
+RunTimePoolInfo& RunTimePoolInfo::operator=(RunTimePoolInfo&& other) {
+ if (this != &other) {
+ release();
+ moveFrom(std::move(other));
+ other.mBuffer = nullptr;
}
+ return *this;
+}
+
+void RunTimePoolInfo::moveFrom(RunTimePoolInfo &&other) {
+ mHidlMemory = std::move(other.mHidlMemory);
+ mBuffer = std::move(other.mBuffer);
+ mMemory = std::move(other.mMemory);
}
void RunTimePoolInfo::release() {
- if (buffer == nullptr) {
- // Never initialized, or already released.
- LOG(ERROR) << "RunTimePoolInfo::release(): incorrect RunTimePoolInfo resource management";
+ if (mBuffer == nullptr) {
+ return;
}
- auto memType = hidlMemory.name();
+ auto memType = mHidlMemory.name();
if (memType == "ashmem") {
// nothing to do
} else if (memType == "mmap_fd") {
- size_t size = hidlMemory.size();
- if (munmap(buffer, size)) {
+ size_t size = mHidlMemory.size();
+ if (munmap(mBuffer, size)) {
LOG(ERROR) << "RunTimePoolInfo::release(): Can't munmap";
}
} else if (memType == "") {
@@ -85,22 +112,23 @@ void RunTimePoolInfo::release() {
} else {
LOG(ERROR) << "RunTimePoolInfo::release(): unsupported hidl_memory type";
}
- memory = nullptr;
- hidlMemory = hidl_memory();
- buffer = nullptr;
+
+ mHidlMemory = hidl_memory();
+ mMemory = nullptr;
+ mBuffer = nullptr;
}
// Making sure the output data are correctly updated after execution.
-bool RunTimePoolInfo::update() {
- auto memType = hidlMemory.name();
+bool RunTimePoolInfo::update() const {
+ auto memType = mHidlMemory.name();
if (memType == "ashmem") {
- memory->commit();
+ mMemory->commit();
return true;
} else if (memType == "mmap_fd") {
- int prot = hidlMemory.handle()->data[1];
+ int prot = mHidlMemory.handle()->data[1];
if (prot & PROT_WRITE) {
- size_t size = hidlMemory.size();
- return msync(buffer, size, MS_SYNC) == 0;
+ size_t size = mHidlMemory.size();
+ return msync(mBuffer, size, MS_SYNC) == 0;
}
}
// No-op for other types of memory.
@@ -109,24 +137,18 @@ bool RunTimePoolInfo::update() {
bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
const hidl_vec<hidl_memory>& pools) {
- poolInfos->resize(pools.size());
- for (size_t i = 0; i < pools.size(); i++) {
- auto& poolInfo = (*poolInfos)[i];
- if (!poolInfo.set(pools[i])) {
- LOG(ERROR) << "Could not map pool";
- for (size_t j = 0; j < i; j++) {
- (*poolInfos)[j].release();
- }
- return false;
- }
+ poolInfos->clear();
+ poolInfos->reserve(pools.size());
+ bool fail = false;
+ for (const auto& pool : pools) {
+ poolInfos->emplace_back(pool, &fail);
}
- return true;
-}
-
-void releaseRunTimePoolInfos(std::vector<RunTimePoolInfo>* poolInfos) {
- for (auto& info : *poolInfos) {
- info.release();
+ if (fail) {
+ LOG(ERROR) << "Could not map pools";
+ poolInfos->clear();
+ return false;
}
+ return true;
}
// Updates the RunTimeOperandInfo with the newly calculated shape.
@@ -179,10 +201,10 @@ int CpuExecutor::run(const Model& model, const Request& request,
return n;
}
}
- for (auto runtimeInfo : modelPoolInfos) {
+ for (auto& runtimeInfo : modelPoolInfos) {
runtimeInfo.update();
}
- for (auto runtimeInfo : requestPoolInfos) {
+ for (auto& runtimeInfo : requestPoolInfos) {
runtimeInfo.update();
}
mModel = nullptr;
@@ -220,7 +242,7 @@ bool CpuExecutor::initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& mode
auto poolIndex = from.location.poolIndex;
nnAssert(poolIndex < modelPoolInfos.size());
auto& r = modelPoolInfos[poolIndex];
- to.buffer = r.buffer + from.location.offset;
+ to.buffer = r.getBuffer() + from.location.offset;
to.numberOfUsesLeft = 0;
break;
}
@@ -260,7 +282,7 @@ bool CpuExecutor::initializeRunTimeInfo(const std::vector<RunTimePoolInfo>& mode
auto poolIndex = from.location.poolIndex;
nnAssert(poolIndex < requestPoolInfos.size());
auto& r = requestPoolInfos[poolIndex];
- to.buffer = r.buffer + from.location.offset;
+ to.buffer = r.getBuffer() + from.location.offset;
}
}
};
diff --git a/nn/common/include/CpuExecutor.h b/nn/common/include/CpuExecutor.h
index 1ddcfe1e6..23a25a9fe 100644
--- a/nn/common/include/CpuExecutor.h
+++ b/nn/common/include/CpuExecutor.h
@@ -60,24 +60,45 @@ struct RunTimeOperandInfo {
};
// Used to keep a pointer to each of the memory pools.
-struct RunTimePoolInfo {
- sp<IMemory> memory;
- hidl_memory hidlMemory;
- uint8_t* buffer = nullptr;
+//
+// In the case of an "mmap_fd" pool, owns the mmap region
+// returned by getBuffer() -- i.e., that region goes away
+// when the RunTimePoolInfo is destroyed or is assigned to.
+class RunTimePoolInfo {
+public:
+ // If "fail" is not nullptr, and construction fails, then set *fail = true.
+ // If construction succeeds, leave *fail unchanged.
+ // getBuffer() == nullptr IFF construction fails.
+ explicit RunTimePoolInfo(const hidl_memory& hidlMemory, bool* fail);
+
+ explicit RunTimePoolInfo(uint8_t* buffer);
+
+ // Implement move
+ RunTimePoolInfo(RunTimePoolInfo&& other);
+ RunTimePoolInfo& operator=(RunTimePoolInfo&& other);
+
+ // Forbid copy
+ RunTimePoolInfo(const RunTimePoolInfo&) = delete;
+ RunTimePoolInfo& operator=(const RunTimePoolInfo&) = delete;
- bool set(const hidl_memory& hidlMemory);
+ ~RunTimePoolInfo() { release(); }
- // Release resources owned by this info (acquired by set()).
+ uint8_t* getBuffer() const { return mBuffer; }
+
+ bool update() const;
+
+private:
void release();
+ void moveFrom(RunTimePoolInfo&& other);
- bool update();
+ hidl_memory mHidlMemory; // always used
+ uint8_t* mBuffer = nullptr; // always used
+ sp<IMemory> mMemory; // only used when hidlMemory.name() == "ashmem"
};
bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
const hidl_vec<hidl_memory>& pools);
-void releaseRunTimePoolInfos(std::vector<RunTimePoolInfo>* poolInfos);
-
// This class is used to execute a model on the CPU.
class CpuExecutor {
public:
diff --git a/nn/driver/sample/SampleDriver.cpp b/nn/driver/sample/SampleDriver.cpp
index b686cfcb5..664607fd4 100644
--- a/nn/driver/sample/SampleDriver.cpp
+++ b/nn/driver/sample/SampleDriver.cpp
@@ -86,7 +86,6 @@ void SamplePreparedModel::asyncExecute(const Request& request,
CpuExecutor executor;
int n = executor.run(mModel, request, mPoolInfos, requestPoolInfos);
VLOG(DRIVER) << "executor.run returned " << n;
- releaseRunTimePoolInfos(&requestPoolInfos);
ErrorStatus executionStatus =
n == ANEURALNETWORKS_NO_ERROR ? ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE;
Return<void> returned = callback->notify(executionStatus);
@@ -114,10 +113,6 @@ Return<ErrorStatus> SamplePreparedModel::execute(const Request& request,
return ErrorStatus::NONE;
}
-SamplePreparedModel::~SamplePreparedModel() {
- releaseRunTimePoolInfos(&mPoolInfos);
-}
-
} // namespace sample_driver
} // namespace nn
} // namespace android
diff --git a/nn/driver/sample/SampleDriver.h b/nn/driver/sample/SampleDriver.h
index 3b2e97356..7e95c952b 100644
--- a/nn/driver/sample/SampleDriver.h
+++ b/nn/driver/sample/SampleDriver.h
@@ -52,7 +52,7 @@ public:
SamplePreparedModel(const Model& model)
: // Make a copy of the model, as we need to preserve it.
mModel(model) {}
- ~SamplePreparedModel() override;
+ ~SamplePreparedModel() override {}
bool initialize();
Return<ErrorStatus> execute(const Request& request,
const sp<IExecutionCallback>& callback) override;
diff --git a/nn/runtime/ExecutionBuilder.cpp b/nn/runtime/ExecutionBuilder.cpp
index 9d041c256..0d2194a5b 100644
--- a/nn/runtime/ExecutionBuilder.cpp
+++ b/nn/runtime/ExecutionBuilder.cpp
@@ -637,8 +637,6 @@ static void asyncStartComputeOnCpu(const Model& model, const Request& request,
const sp<IExecutionCallback>& executionCallback) {
CpuExecutor executor;
int err = executor.run(model, request, modelPoolInfos, requestPoolInfos);
- releaseRunTimePoolInfos(const_cast<std::vector<RunTimePoolInfo>*>(&modelPoolInfos));
- releaseRunTimePoolInfos(const_cast<std::vector<RunTimePoolInfo>*>(&requestPoolInfos));
ErrorStatus status = err == ANEURALNETWORKS_NO_ERROR ?
ErrorStatus::NONE : ErrorStatus::GENERAL_FAILURE;
executionCallback->notify(status);
@@ -663,28 +661,22 @@ int StepExecutor::startComputeOnCpu(sp<ExecutionCallback>* synchronizationCallba
}
std::vector<RunTimePoolInfo> requestPoolInfos;
- uint32_t count = mMemories.size();
- requestPoolInfos.resize(count);
- for (uint32_t i = 0; i < count; i++) {
- const Memory* mem = mMemories[i];
- if (!requestPoolInfos[i].set(mem->getHidlMemory())) {
- for (uint32_t j = 0; j < i; j++) {
- requestPoolInfos[j].release();
- }
- releaseRunTimePoolInfos(&modelPoolInfos);
- return ANEURALNETWORKS_UNMAPPABLE;
- }
+ requestPoolInfos.reserve(mMemories.size());
+ bool fail = false;
+ for (const Memory* mem : mMemories) {
+ requestPoolInfos.emplace_back(mem->getHidlMemory(), &fail);
+ }
+ if (fail) {
+ return ANEURALNETWORKS_UNMAPPABLE;
}
// Create as many pools as there are input / output.
auto fixPointerArguments = [&requestPoolInfos](std::vector<ModelArgumentInfo>& argumentInfos) {
for (ModelArgumentInfo& argumentInfo : argumentInfos) {
if (argumentInfo.state == ModelArgumentInfo::POINTER) {
- RunTimePoolInfo runTimeInfo = {
- .buffer = static_cast<uint8_t*>(argumentInfo.buffer)};
argumentInfo.locationAndLength.poolIndex =
static_cast<uint32_t>(requestPoolInfos.size());
argumentInfo.locationAndLength.offset = 0;
- requestPoolInfos.push_back(runTimeInfo);
+ requestPoolInfos.emplace_back(static_cast<uint8_t*>(argumentInfo.buffer));
}
}
};
diff --git a/nn/runtime/Memory.h b/nn/runtime/Memory.h
index 9dc92ccaa..15b56e57a 100644
--- a/nn/runtime/Memory.h
+++ b/nn/runtime/Memory.h
@@ -89,6 +89,12 @@ private:
// The user of this class is responsible for avoiding concurrent calls
// to this class from multiple threads.
class MemoryTracker {
+private:
+ // The vector of Memory pointers we are building.
+ std::vector<const Memory*> mMemories;
+ // A faster way to see if we already have a memory than doing find().
+ std::unordered_map<const Memory*, uint32_t> mKnown;
+
public:
// Adds the memory, if it does not already exists. Returns its index.
// The memories should survive the tracker.
@@ -97,12 +103,9 @@ public:
uint32_t size() const { return static_cast<uint32_t>(mKnown.size()); }
// Returns the ith memory.
const Memory* operator[](size_t i) const { return mMemories[i]; }
-
-private:
- // The vector of Memory pointers we are building.
- std::vector<const Memory*> mMemories;
- // A faster way to see if we already have a memory than doing find().
- std::unordered_map<const Memory*, uint32_t> mKnown;
+ // Iteration
+ decltype(mMemories.begin()) begin() { return mMemories.begin(); }
+ decltype(mMemories.end()) end() { return mMemories.end(); }
};
} // namespace nn