summaryrefslogtreecommitdiff
path: root/nn/runtime/test/TestGenerated.cpp
diff options
context:
space:
mode:
authorSlava Shklyaev <slavash@google.com>2020-01-30 17:06:25 +0000
committerSlava Shklyaev <slavash@google.com>2020-02-14 15:45:46 +0000
commitf018a4edc6c94abbf6c906002500f09bdbd89db6 (patch)
tree38506ce2e6779ce9a128a1955fe7a48d503daae5 /nn/runtime/test/TestGenerated.cpp
parent4d274101ef688b1dac086f5b7eb3acb2cbbc24d7 (diff)
downloadml-f018a4edc6c94abbf6c906002500f09bdbd89db6.tar.gz
Add control flow support to test harness
This change introduces a new TestModel structure that mirrors the HAL changes in I7a75175f. Bug: 148601177 Bug: 136735929 Test: NNT_static Change-Id: I98a3edd11f9ef74abbe148a2dcf13e8a5f74bc06
Diffstat (limited to 'nn/runtime/test/TestGenerated.cpp')
-rw-r--r--nn/runtime/test/TestGenerated.cpp85
1 files changed, 55 insertions, 30 deletions
diff --git a/nn/runtime/test/TestGenerated.cpp b/nn/runtime/test/TestGenerated.cpp
index 85c06209f..85a164dde 100644
--- a/nn/runtime/test/TestGenerated.cpp
+++ b/nn/runtime/test/TestGenerated.cpp
@@ -119,37 +119,59 @@ static OperandType getOperandType(const TestOperand& op, bool testDynamicOutputS
}
}
-void createModel(const TestModel& testModel, bool testDynamicOutputShape, Model* model) {
- ASSERT_NE(nullptr, model);
-
+static void createModelFromSubgraph(const TestSubgraph& subgraph, bool testDynamicOutputShape,
+ const std::vector<TestSubgraph>& refSubgraphs, Model* model,
+ Model* refModels) {
// Operands.
- for (const auto& operand : testModel.operands) {
+ for (const auto& operand : subgraph.operands) {
auto type = getOperandType(operand, testDynamicOutputShape);
auto index = model->addOperand(&type);
switch (operand.lifetime) {
case TestOperandLifeTime::CONSTANT_COPY:
- case TestOperandLifeTime::CONSTANT_REFERENCE:
+ case TestOperandLifeTime::CONSTANT_REFERENCE: {
model->setOperandValue(index, operand.data.get<void>(), operand.data.size());
- break;
- case TestOperandLifeTime::NO_VALUE:
+ } break;
+ case TestOperandLifeTime::NO_VALUE: {
model->setOperandValue(index, nullptr, 0);
- break;
+ } break;
+ case TestOperandLifeTime::SUBGRAPH: {
+ uint32_t refIndex = *operand.data.get<uint32_t>();
+ CHECK_LT(refIndex, refSubgraphs.size());
+ const TestSubgraph& refSubgraph = refSubgraphs[refIndex];
+ Model* refModel = &refModels[refIndex];
+ if (!refModel->isFinished()) {
+ createModelFromSubgraph(refSubgraph, testDynamicOutputShape, refSubgraphs,
+ refModel, refModels);
+ ASSERT_EQ(refModel->finish(), Result::NO_ERROR);
+ ASSERT_TRUE(refModel->isValid());
+ }
+ model->setOperandValueFromModel(index, refModel);
+ } break;
case TestOperandLifeTime::SUBGRAPH_INPUT:
case TestOperandLifeTime::SUBGRAPH_OUTPUT:
- case TestOperandLifeTime::TEMPORARY_VARIABLE:
+ case TestOperandLifeTime::TEMPORARY_VARIABLE: {
// Nothing to do here.
- break;
+ } break;
}
}
// Operations.
- for (const auto& operation : testModel.operations) {
+ for (const auto& operation : subgraph.operations) {
model->addOperation(static_cast<int>(operation.type), operation.inputs, operation.outputs);
}
// Inputs and outputs.
- model->identifyInputsAndOutputs(testModel.inputIndexes, testModel.outputIndexes);
+ model->identifyInputsAndOutputs(subgraph.inputIndexes, subgraph.outputIndexes);
+}
+
+void createModel(const TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model) {
+ ASSERT_NE(nullptr, model);
+
+ std::vector<Model> refModels(testModel.referenced.size());
+ createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, model,
+ refModels.data());
+ model->setRefModels(std::move(refModels));
// Relaxed computation.
model->relaxComputationFloat32toFloat16(testModel.isRelaxed);
@@ -163,15 +185,15 @@ static void createRequest(const TestModel& testModel, Execution* execution,
ASSERT_NE(nullptr, outputs);
// Model inputs.
- for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
- const auto& operand = testModel.operands[testModel.inputIndexes[i]];
+ for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
+ const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
ASSERT_EQ(Result::NO_ERROR,
execution->setInput(i, operand.data.get<void>(), operand.data.size()));
}
// Model outputs.
- for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
- const auto& operand = testModel.operands[testModel.outputIndexes[i]];
+ for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
+ const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
// In the case of zero-sized output, we should at least provide a one-byte buffer.
// This is because zero-sized tensors are only supported internally to the runtime, or
@@ -305,9 +327,9 @@ static void computeWithDeviceMemories(const Compilation& compilation, const Test
{
NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithDeviceMemories example");
// Model inputs.
- for (uint32_t i = 0; i < testModel.inputIndexes.size(); i++) {
+ for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
SCOPED_TRACE("Input index: " + std::to_string(i));
- const auto& operand = testModel.operands[testModel.inputIndexes[i]];
+ const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
// Omitted input.
if (operand.data.size() == 0) {
ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
@@ -328,7 +350,7 @@ static void computeWithDeviceMemories(const Compilation& compilation, const Test
}
// Model outputs.
- for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
+ for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
SCOPED_TRACE("Output index: " + std::to_string(i));
ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
ASSERT_NE(memory, nullptr);
@@ -340,9 +362,9 @@ static void computeWithDeviceMemories(const Compilation& compilation, const Test
*result = execution->compute();
// Copy out output results.
- for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
+ for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
SCOPED_TRACE("Output index: " + std::to_string(i));
- const auto& operand = testModel.operands[testModel.outputIndexes[i]];
+ const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
const size_t bufferSize = operand.data.size();
auto& output = outputs->emplace_back(bufferSize);
@@ -383,8 +405,8 @@ void GeneratedTests::executeWithCompilation(const Compilation& compilation,
}
// Check output dimensions.
- for (uint32_t i = 0; i < testModel.outputIndexes.size(); i++) {
- const auto& output = testModel.operands[testModel.outputIndexes[i]];
+ for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
+ const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]];
if (output.isIgnored) continue;
std::vector<uint32_t> actualDimensions;
ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(i, &actualDimensions));
@@ -432,9 +454,10 @@ void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model,
// Test driver for those generated from ml/nn/runtime/test/spec
void GeneratedTests::execute(const TestModel& testModel) {
NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
- Model model;
+ GeneratedModel model;
createModel(testModel, mTestDynamicOutputShape, &model);
- model.finish();
+ ASSERT_EQ(model.finish(), Result::NO_ERROR);
+ ASSERT_TRUE(model.isValid());
auto executeInternal = [&testModel, &model, this]() {
SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
#ifndef NNTEST_MULTITHREADED
@@ -530,22 +553,24 @@ INSTANTIATE_GENERATED_TEST(GeneratedValidationTests,
[](const TestModel& testModel) { return testModel.expectFailure; });
INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
- return testModel.operations.size() == 1 && testModel.hasQuant8CoupledOperands();
+ return testModel.main.operations.size() == 1 && testModel.referenced.size() == 0 &&
+ testModel.hasQuant8CoupledOperands();
});
INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) {
return !testModel.expectFailure &&
- std::all_of(testModel.outputIndexes.begin(), testModel.outputIndexes.end(),
+ std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
[&testModel](uint32_t index) {
- return testModel.operands[index].data.size() > 0;
+ return testModel.main.operands[index].data.size() > 0;
});
});
INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
return !testModel.expectFailure &&
- std::all_of(testModel.outputIndexes.begin(), testModel.outputIndexes.end(),
+ std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
[&testModel](uint32_t index) {
- return testModel.operands[index].data.size() > 0;
+ return testModel.main.operands[index].data.size() > 0;
});
});
+
} // namespace android::nn::generated_tests