diff options
author | Mika Raento <mikie@google.com> | 2018-04-17 14:09:53 +0100 |
---|---|---|
committer | Mika Raento <mikie@google.com> | 2018-04-25 12:02:54 +0100 |
commit | 951a1eeb114a6aacc87d6df67b9090bb94afe3bf (patch) | |
tree | 42d0ce4ac50a21711311e2daea8f19cea908f1d2 /nn/runtime/test/TestUnknownDimensions.cpp | |
parent | e770a8d43244527917f62c41e8fba5eeb270ca66 (diff) | |
download | ml-951a1eeb114a6aacc87d6df67b9090bb94afe3bf.tar.gz |
Add test for dimensions unknown at compile time
Adds a test for operands which have one or more unknown dimensions at
compile time but are fully known at execution time. This is meant to be
supported but had a bug earlier.
Bug: 72448000
Test: NeuralNetworksTest_static (emulator)
Change-Id: I7ad5e6b6c29f8fe45098c9291d6a2d4b00c58eb8
Diffstat (limited to 'nn/runtime/test/TestUnknownDimensions.cpp')
-rw-r--r-- | nn/runtime/test/TestUnknownDimensions.cpp | 234 |
1 files changed, 234 insertions, 0 deletions
diff --git a/nn/runtime/test/TestUnknownDimensions.cpp b/nn/runtime/test/TestUnknownDimensions.cpp new file mode 100644 index 000000000..28e66f6a7 --- /dev/null +++ b/nn/runtime/test/TestUnknownDimensions.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NeuralNetworksWrapper.h" +#include "TestHarness.h" + +#include <gtest/gtest.h> + +#include <tuple> +#include <vector> + +using namespace android::nn::wrapper; +using namespace test_helper; + +namespace { + +const uint32_t INTENDED_SIZE = 3; +const uint32_t OTHER_SIZE = 2; +const uint32_t UNKNOWN_SIZE = 0; +typedef float IntendedMatrix[INTENDED_SIZE][INTENDED_SIZE]; + +// We test three basic scenarios for each tensor dimension: +// INTENDED_AT_COMPILE_AND_EXECUTE: set the dimension at compile +// (addOperand) time to INTENDED_SIZE, use same size at execution +// (setInput/setOutput) time. This should always work. +// +// INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE: set the dimension at compile +// (addOperand) time to INTENDED_SIZE, give no size at execution time. +// This should always work. +// +// UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE: don't set the dimension at +// compile (addOperand) time, use INTENDED_SIZE at execute +// (setInput/setOutput) time. Note for constants, this just means using an +// unknown dimension at addOperand as there is no type parameter to +// setOperandValue. This should work for inputs and outputs and give an +// error for constants at compile time. +// +// UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE: don't set the dimension at compile +// (addOperand) time, use OTHER_SIZE at execute (setInput/setOutput) time. +// This should give an error at execute time (as the constant value will +// have a different size). +enum class DimensionKind { INTENDED_AT_COMPILE_AND_EXECUTE, + INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE, + UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE, + UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE }; +typedef std::tuple<DimensionKind, DimensionKind> OperandParams; +typedef std::tuple<OperandParams, // first input + OperandParams, // second input + OperandParams, // constant + OperandParams // output + > TestParams; +// All relevant combinations of the basic scenarios are then created with TEST_P +auto ioDimensionValues = testing::Values(DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE, + DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE, + DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE, + DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE); +auto constantDimensionValues = testing::Values( + DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE, + DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE); +auto ioValues = testing::Combine(ioDimensionValues, ioDimensionValues); +auto constantValues = testing::Combine(constantDimensionValues, constantDimensionValues); + + +class UnknownDimensionsTest : public ::testing::TestWithParam<TestParams> { +protected: + const IntendedMatrix ones = { { 1.f, 1.f, 1.f }, { 1.f, 1.f, 1.f }, { 1.f, 1.f, 1.f } }; + const IntendedMatrix twos = { { 2.f, 2.f, 2.f }, { 2.f, 2.f, 2.f }, { 2.f, 2.f, 2.f } }; + const IntendedMatrix fives = { { 5.f, 5.f, 5.f }, { 5.f, 5.f, 5.f }, { 5.f, 5.f, 5.f } }; +}; + +TEST_P(UnknownDimensionsTest, UnknownDimensions) { + TestParams params = GetParam(); + auto paramsForInput0 = std::get<0>(params), + paramsForInput1 = std::get<1>(params), + paramsForConst = std::get<2>(params), + paramsForOutput = std::get<3>(params); + + Model model; + std::string input0Scope("Input 0:"), input1Scope("Input 1:"), + constantScope("Constant:"), outputScope("Output:"); + + auto getDimForCompile = [](DimensionKind kind, std::string* scope) { + switch (kind) { + case DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE: + if (scope) scope->append(" INTENDED_AT_COMPILE_AND_EXECUTE"); + return INTENDED_SIZE; + case DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE: + if (scope) scope->append(" INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE"); + return INTENDED_SIZE; + case DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE: + if (scope) scope->append(" UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE"); + return UNKNOWN_SIZE; + case DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE: + if (scope) scope->append(" UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE"); + return UNKNOWN_SIZE; + } + }; + auto addOperand = [&model, &getDimForCompile](OperandParams params, + std::string* scope = nullptr) { + OperandType matrixTypeWithPotentiallyUnknownDims( + Type::TENSOR_FLOAT32, + { getDimForCompile(std::get<0>(params), scope), + getDimForCompile(std::get<1>(params), scope) }); + return model.addOperand(&matrixTypeWithPotentiallyUnknownDims); + }; + auto inputOpd0 = addOperand(paramsForInput0, &input0Scope); + auto inputOpd1 = addOperand(paramsForInput1, &input1Scope); + auto intermediateOpd0 = addOperand(OperandParams{ + // Dimensions for intermediate operand actually deduced at execution time + DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE, + DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE}); + auto constantOpd0 = addOperand(paramsForConst, &constantScope); + auto outputOpd0 = addOperand(paramsForOutput, &outputScope); + + // Make the gtest failure easier to read, TEST_P just outputs a list of + // numbers + SCOPED_TRACE(input0Scope); + SCOPED_TRACE(input1Scope); + SCOPED_TRACE(constantScope); + SCOPED_TRACE(outputScope); + + OperandType scalarType(Type::INT32, {}); + int32_t activation(ANEURALNETWORKS_FUSED_NONE); + auto activationOpd0 = model.addOperand(&scalarType); + + model.setOperandValue(activationOpd0, &activation, sizeof(activation)); + model.setOperandValue(constantOpd0, twos, sizeof(twos)); + model.addOperation(ANEURALNETWORKS_ADD, + {inputOpd0, inputOpd1, activationOpd0}, + {intermediateOpd0}); + model.addOperation(ANEURALNETWORKS_ADD, + {intermediateOpd0, constantOpd0, activationOpd0}, + {outputOpd0}); + model.identifyInputsAndOutputs({inputOpd0, inputOpd1}, {outputOpd0}); + if (std::get<0>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE && + std::get<1>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) { + ASSERT_TRUE(model.isValid()); + ASSERT_EQ(model.finish(), Result::NO_ERROR); + } else { + ASSERT_FALSE(model.isValid()); + // There is no contract (yet) for specific errors in NeuralNetworks.h, + // so we just assert on not being successful. + ASSERT_NE(model.finish(), Result::NO_ERROR); + return; + } + + Compilation compilation(&model); + ASSERT_EQ(compilation.finish(), Result::NO_ERROR); + + IntendedMatrix actual = { { -1.f, -1.f, -1.f }, { -1.f, -1.f, -1.f }, { -1.f, -1.f, -1.f } }; + Execution execution(&compilation); + + OperandType matrixTypeIntended(Type::TENSOR_FLOAT32, {INTENDED_SIZE, INTENDED_SIZE}); + OperandType matrixTypeFirstOther(Type::TENSOR_FLOAT32, {OTHER_SIZE, INTENDED_SIZE}); + OperandType matrixTypeSecondOther(Type::TENSOR_FLOAT32, {INTENDED_SIZE, OTHER_SIZE}); + OperandType matrixTypeBothOther(Type::TENSOR_FLOAT32, {OTHER_SIZE, OTHER_SIZE}); + bool allAreIntendedSizeAtExecution = true; + + // Helper to return appropriate "type" parameter to setInput/setOutput based + // on OperandParams + auto typeAtSet = [&](OperandParams params) { + auto first = std::get<0>(params), second = std::get<1>(params); + if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE && + second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) { + allAreIntendedSizeAtExecution = false; + return &matrixTypeBothOther.operandType; + } else if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) { + allAreIntendedSizeAtExecution = false; + return &matrixTypeFirstOther.operandType; + } else if (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) { + allAreIntendedSizeAtExecution = false; + return &matrixTypeSecondOther.operandType; + } else if (first == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE && + second == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE) { + return &matrixTypeIntended.operandType; + } else if (first == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE && + second == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) { + return static_cast<ANeuralNetworksOperandType*>(nullptr); + } else { + return &matrixTypeIntended.operandType; + } + }; + // Helper to return appropriate "size" parameter to setInput/setOutput based + // on OperandParams + auto sizeAtSet = [](OperandParams params) { + auto first = std::get<0>(params), second = std::get<1>(params); + size_t firstDim = (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) ? + OTHER_SIZE : INTENDED_SIZE; + size_t secondDim = (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) ? + OTHER_SIZE : INTENDED_SIZE; + return firstDim * secondDim * sizeof(float); + }; + ASSERT_EQ(execution.setInput(0, ones, sizeAtSet(paramsForInput0), typeAtSet(paramsForInput0)), + Result::NO_ERROR); + ASSERT_EQ(execution.setInput(1, twos, sizeAtSet(paramsForInput1), typeAtSet(paramsForInput1)), + Result::NO_ERROR); + ASSERT_EQ(execution.setOutput(0, actual, sizeAtSet(paramsForOutput), + typeAtSet(paramsForOutput)), + Result::NO_ERROR); + + if (allAreIntendedSizeAtExecution) { + ASSERT_EQ(execution.compute(), Result::NO_ERROR); + } else { + // There is no contract (yet) for specific errors in NeuralNetworks.h, + // so we just assert on not being successful. + ASSERT_NE(execution.compute(), Result::NO_ERROR); + return; + } + + using fvec = std::vector<float>; + constexpr size_t count = sizeof(fives) / sizeof(fives[0][0]); + compare( + MixedTyped{{{0, fvec{&fives[0][0], &fives[0][0] + count}}}, {}, {}}, + MixedTyped{{{0, fvec{&actual[0][0], &actual[0][0] + count}}}, {}, {}}); +} + +INSTANTIATE_TEST_CASE_P(UnknownCombinationsTest, UnknownDimensionsTest, + testing::Combine(ioValues, ioValues, + constantValues, ioValues)); + +} // end namespace |