summaryrefslogtreecommitdiff
path: root/nn/common
diff options
context:
space:
mode:
authorJim Pollock <jmpollock@google.com>2020-07-15 18:05:15 +0100
committerJim Pollock <jmpollock@google.com>2020-07-22 11:34:04 +0100
commit5b19c79ba1aed2a04a51d1933f744c6fde2c638f (patch)
tree354bd889443d6711f6b2bd65ff5b1b91f6828ea0 /nn/common
parent703db4a70bed26a177ae0d99e5a938387cdd9dce (diff)
downloadml-5b19c79ba1aed2a04a51d1933f744c6fde2c638f.tar.gz
nn: fix memory leak in CPUExecutor while op
If a while loop exits due to timeout, the operands aren't cleaned up correctly. cherry-picked from chromium: 706a25ef29307aa6ed0ac95334d2db17f438c1c4 https://crrev.com/c/2300539 BUG=b:161446175 Test: mma within frameworks/ml Change-Id: I411ae4db3d1bd242297f3700b1c9eed53076675e
Diffstat (limited to 'nn/common')
-rw-r--r--nn/common/CpuExecutor.cpp34
1 files changed, 21 insertions, 13 deletions
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index b66339b4c..db95b76cb 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -19,6 +19,8 @@
#include "CpuExecutor.h"
#include <android/hardware_buffer.h>
+#include <android-base/scopeguard.h>
+
#include <sys/mman.h>
#include <vndk/hardware_buffer.h>
@@ -1797,6 +1799,25 @@ int CpuExecutor::executeWhileOperation(const Operation& operation, RunTimeOperan
std::vector<uint8_t*> tmp1(bodySubgraph.outputIndexes.size());
std::vector<uint8_t*> tmp2(bodySubgraph.outputIndexes.size());
+ // Ensure objects are freed
+ auto cleanupGuard = base::make_scope_guard(
+ [&tmp1, &tmp2, &condOperands, &bodyOperands, &operation, &operands] {
+ auto freeLoopOutputs = [](const std::vector<uint8_t*>& tmp) {
+ for (auto buffer : tmp) {
+ if (buffer != nullptr) {
+ delete[] buffer;
+ }
+ }
+ };
+
+ freeLoopOutputs(tmp1);
+ freeLoopOutputs(tmp2);
+ freeUnusedSubgraphOperands(&condOperands);
+ freeUnusedSubgraphOperands(&bodyOperands);
+ consumeOperationInputs(operation.inputs, operands);
+ }
+ );
+
// For body outputs with unknown shape, we skip double buffering and
// allocate on each iteration instead. This allows growing output tensors
// inside a WHILE loop.
@@ -1883,19 +1904,6 @@ int CpuExecutor::executeWhileOperation(const Operation& operation, RunTimeOperan
std::memcpy(outerOperand.buffer, innerOperand.buffer, innerOperand.length);
}
- auto freeLoopOutputs = [](const std::vector<uint8_t*>& tmp) {
- for (auto buffer : tmp) {
- if (buffer != nullptr) {
- delete[] buffer;
- }
- }
- };
- freeLoopOutputs(tmp1);
- freeLoopOutputs(tmp2);
- freeUnusedSubgraphOperands(&condOperands);
- freeUnusedSubgraphOperands(&bodyOperands);
- consumeOperationInputs(operation.inputs, operands);
-
return ANEURALNETWORKS_NO_ERROR;
}