summaryrefslogtreecommitdiff
path: root/nn/runtime/test/TestGenerated.cpp
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2020-03-04 14:08:59 -0800
committerXusong Wang <xusongw@google.com>2020-03-09 16:09:21 -0700
commite968bb1055e582571a0a9bdccc12b6ba7bdaefce (patch)
tree524ac770e46177e02e153b4007f35bdce1fed635 /nn/runtime/test/TestGenerated.cpp
parent6e4afba20cd62dade0dd178b638e406cc3bc13dc (diff)
downloadml-e968bb1055e582571a0a9bdccc12b6ba7bdaefce.tar.gz
Separate generated test utilities and the tests.
The enables a test target to use the generated test utilities without including all the generated tests. Bug: 150805665 Test: NNT_static Change-Id: I58fbf949a03f8f0199b0893da41fc90b5b5c004e
Diffstat (limited to 'nn/runtime/test/TestGenerated.cpp')
-rw-r--r--nn/runtime/test/TestGenerated.cpp107
1 files changed, 1 insertions, 106 deletions
diff --git a/nn/runtime/test/TestGenerated.cpp b/nn/runtime/test/TestGenerated.cpp
index bcb3193c6..952f747b0 100644
--- a/nn/runtime/test/TestGenerated.cpp
+++ b/nn/runtime/test/TestGenerated.cpp
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "TestGenerated.h"
-
#include <android-base/logging.h>
#include <android-base/mapped_file.h>
#include <android-base/unique_fd.h>
@@ -36,6 +34,7 @@
#include <utility>
#include <vector>
+#include "GeneratedTestUtils.h"
#include "TestHarness.h"
#include "TestNeuralNetworksWrapper.h"
@@ -105,110 +104,6 @@ class DeviceMemoryTest : public GeneratedTests {
DeviceMemoryTest() { mTestDeviceMemory = true; }
};
-static OperandType getOperandType(const TestOperand& op, bool testDynamicOutputShape) {
- auto dims = op.dimensions;
- if (testDynamicOutputShape && op.lifetime == TestOperandLifeTime::SUBGRAPH_OUTPUT) {
- dims.assign(dims.size(), 0);
- }
- if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
- return OperandType(
- static_cast<Type>(op.type), dims,
- SymmPerChannelQuantParams(op.channelQuant.scales, op.channelQuant.channelDim));
- } else {
- return OperandType(static_cast<Type>(op.type), dims, op.scale, op.zeroPoint);
- }
-}
-
-static void createModelFromSubgraph(const TestSubgraph& subgraph, bool testDynamicOutputShape,
- const std::vector<TestSubgraph>& refSubgraphs, Model* model,
- Model* refModels) {
- // Operands.
- for (const auto& operand : subgraph.operands) {
- auto type = getOperandType(operand, testDynamicOutputShape);
- auto index = model->addOperand(&type);
-
- switch (operand.lifetime) {
- case TestOperandLifeTime::CONSTANT_COPY:
- case TestOperandLifeTime::CONSTANT_REFERENCE: {
- model->setOperandValue(index, operand.data.get<void>(), operand.data.size());
- } break;
- case TestOperandLifeTime::NO_VALUE: {
- model->setOperandValue(index, nullptr, 0);
- } break;
- case TestOperandLifeTime::SUBGRAPH: {
- uint32_t refIndex = *operand.data.get<uint32_t>();
- CHECK_LT(refIndex, refSubgraphs.size());
- const TestSubgraph& refSubgraph = refSubgraphs[refIndex];
- Model* refModel = &refModels[refIndex];
- if (!refModel->isFinished()) {
- createModelFromSubgraph(refSubgraph, testDynamicOutputShape, refSubgraphs,
- refModel, refModels);
- ASSERT_EQ(refModel->finish(), Result::NO_ERROR);
- ASSERT_TRUE(refModel->isValid());
- }
- model->setOperandValueFromModel(index, refModel);
- } break;
- case TestOperandLifeTime::SUBGRAPH_INPUT:
- case TestOperandLifeTime::SUBGRAPH_OUTPUT:
- case TestOperandLifeTime::TEMPORARY_VARIABLE: {
- // Nothing to do here.
- } break;
- }
- }
-
- // Operations.
- for (const auto& operation : subgraph.operations) {
- model->addOperation(static_cast<int>(operation.type), operation.inputs, operation.outputs);
- }
-
- // Inputs and outputs.
- model->identifyInputsAndOutputs(subgraph.inputIndexes, subgraph.outputIndexes);
-}
-
-void createModel(const TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model) {
- ASSERT_NE(nullptr, model);
-
- std::vector<Model> refModels(testModel.referenced.size());
- createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, model,
- refModels.data());
- model->setRefModels(std::move(refModels));
-
- // Relaxed computation.
- model->relaxComputationFloat32toFloat16(testModel.isRelaxed);
-
- ASSERT_TRUE(model->isValid());
-}
-
-static void createRequest(const TestModel& testModel, Execution* execution,
- std::vector<TestBuffer>* outputs) {
- ASSERT_NE(nullptr, execution);
- ASSERT_NE(nullptr, outputs);
-
- // Model inputs.
- for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
- const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
- ASSERT_EQ(Result::NO_ERROR,
- execution->setInput(i, operand.data.get<void>(), operand.data.size()));
- }
-
- // Model outputs.
- for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
- const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
-
- // In the case of zero-sized output, we should at least provide a one-byte buffer.
- // This is because zero-sized tensors are only supported internally to the runtime, or
- // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
- // tensor as model output. Otherwise, we will have two semantic conflicts:
- // - "Zero dimension" conflicts with "unspecified dimension".
- // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
- const size_t bufferSize = std::max<size_t>(operand.data.size(), 1);
-
- outputs->emplace_back(bufferSize);
- ASSERT_EQ(Result::NO_ERROR,
- execution->setOutput(i, outputs->back().getMutable<void>(), bufferSize));
- }
-}
-
std::optional<Compilation> GeneratedTests::compileModel(const Model& model) {
NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
if (mTestCompilationCaching) {