summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiao Wang <miaowang@google.com>2018-03-06 15:03:14 -0800
committerMiao Wang <miaowang@google.com>2018-04-04 17:37:02 -0700
commit68b32d482cfb320be614b3aa68f57cc9f2e78a5b (patch)
tree67f5b19f865997fe7e66a5dc06e058210c248abd
parentd585bc2ad47317b9e8fd0937c7fb9fa608fd19d8 (diff)
downloadml-68b32d482cfb320be614b3aa68f57cc9f2e78a5b.tar.gz
Add basic validation for ANeuralNetworksModel_addOperation
- This change tries to catch errors during ANeuralNetworksModel_addOperation. - It checks the expected number of arguments and their types. - Add tests for most of the operations. - Fix a bug in another test caught by this change. Bug: 67828197 Test: mm Test: NeuralNetworksTest Merged-In: I565a8a08f96dd9f2b15a8e413360df0bedc57f37 Change-Id: I565a8a08f96dd9f2b15a8e413360df0bedc57f37 (cherry picked from commit 137d278a0b2e3f2b518ab30b24755838d807759a)
-rw-r--r--nn/common/Utils.cpp1012
-rw-r--r--nn/common/include/Utils.h4
-rw-r--r--nn/runtime/ModelBuilder.cpp9
-rw-r--r--nn/runtime/test/Android.bp1
-rw-r--r--nn/runtime/test/TestValidateOperations.cpp774
-rw-r--r--nn/runtime/test/TestValidation.cpp11
6 files changed, 1801 insertions, 10 deletions
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 6a79e5e28..64fbbd3ea 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -332,6 +332,1018 @@ int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCo
return ANEURALNETWORKS_NO_ERROR;
}
+int validateOperationOperandTypes(const std::vector<Operand>& operands,
+ uint32_t inOperandCount, const uint32_t* inOperandIndexes,
+ const std::vector<OperandType>& inExpectedTypes,
+ uint32_t outOperandCount, const uint32_t* outOperandIndexes,
+ const std::vector<OperandType>& outExpectedInTypes) {
+ if (inOperandCount > static_cast<uint32_t>(inExpectedTypes.size()) ||
+ outOperandCount > static_cast<uint32_t>(outExpectedInTypes.size())) {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ for (uint32_t i = 0; i < inOperandCount; i++) {
+ if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
+ LOG(ERROR) << "Invalid input tensor type "
+ << toString(operands[inOperandIndexes[i]].type)
+ << " for input " << i << ", expected " << toString(inExpectedTypes[i]);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+ for (uint32_t i = 0; i < outOperandCount; i++) {
+ if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
+ LOG(ERROR) << "Invalid output tensor type "
+ << toString(operands[outOperandIndexes[i]].type)
+ << " for input " << i << ", expected " << toString(outExpectedInTypes[i]);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int validateOperation(ANeuralNetworksOperationType opType,
+ uint32_t inputCount, const uint32_t* inputIndexes,
+ uint32_t outputCount, const uint32_t* outputIndexes,
+ const std::vector<Operand>& operands) {
+ int n = validateOperandList(inputCount, inputIndexes, static_cast<uint32_t>(operands.size()),
+ "ANeuralNetworksModel_addOperation inputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+ n = validateOperandList(outputCount, outputIndexes, static_cast<uint32_t>(operands.size()),
+ "ANeuralNetworksModel_addOperation outputs");
+ if (n != ANEURALNETWORKS_NO_ERROR) {
+ return n;
+ }
+
+ auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected " << expIn << ") or output operands ("
+ << outputCount << ", expected " << expOut << ") for operation "
+ << kOperationNames[opType];
+ };
+
+ switch (opType) {
+ case ANEURALNETWORKS_OEM_OPERATION: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_ADD: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_MUL: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_FLOOR: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_DEQUANTIZE: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_DEPTHWISE_CONV_2D: {
+ if ((inputCount != 11 && inputCount != 8) || outputCount != 1) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected 11 or 8) or output operands ("
+ << outputCount << ", expected 1) for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (inputCount == 11) {
+ std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
+ inExpectedTypes.insert(inExpectedTypes.end(),
+ explicitScalarTypes.begin(),
+ explicitScalarTypes.end());
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_CONV_2D: {
+ if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected 10 or 7) or output operands ("
+ << outputCount << ", expected 1) for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (inputCount == 10) {
+ std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
+ inExpectedTypes.insert(inExpectedTypes.end(),
+ explicitScalarTypes.begin(),
+ explicitScalarTypes.end());
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_AVERAGE_POOL_2D: {
+ if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected 10 or 7) or output operands ("
+ << outputCount << ", expected 1) for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (inputCount == 10) {
+ std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
+ inExpectedTypes.insert(inExpectedTypes.end(),
+ explicitScalarTypes.begin(),
+ explicitScalarTypes.end());
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_L2_POOL_2D: {
+ if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected 10 or 7) or output operands ("
+ << outputCount << ", expected 1) for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (inputCount == 10) {
+ std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
+ inExpectedTypes.insert(inExpectedTypes.end(),
+ explicitScalarTypes.begin(),
+ explicitScalarTypes.end());
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_MAX_POOL_2D: {
+ if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected 10 or 7) or output operands ("
+ << outputCount << ", expected 1) for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if (inputCount == 10) {
+ std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
+ inExpectedTypes.insert(inExpectedTypes.end(),
+ explicitScalarTypes.begin(),
+ explicitScalarTypes.end());
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_RELU: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_RELU1: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_RELU6: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_TANH: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_LOGISTIC: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_SOFTMAX: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_FULLY_CONNECTED: {
+ if (inputCount != 4 || outputCount != 1) {
+ logInvalidInOutNumber(4, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_CONCATENATION: {
+ if (inputCount < 2 || outputCount != 1) {
+ LOG(ERROR) << "Invalid number of input operands ("
+ << inputCount << ", expected at least 2) or output operands ("
+ << outputCount << ", expected 1) for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes(inputCount, inputType);
+ std::vector<OperandType> outExpectedTypes = {inputType};
+ // The last one is the activation function.
+ inExpectedTypes.back() = OperandType::INT32;
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_L2_NORMALIZATION: {
+ if (inputCount != 1 || outputCount != 1) {
+ logInvalidInOutNumber(1, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION: {
+ if (inputCount != 5 || outputCount != 1) {
+ logInvalidInOutNumber(5, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::FLOAT32,
+ OperandType::FLOAT32,
+ OperandType::FLOAT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_RESHAPE: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_RESIZE_BILINEAR: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_DEPTH_TO_SPACE: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_SPACE_TO_DEPTH: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ // TODO: add validations for the following ops.
+ case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_LSH_PROJECTION: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_LSTM: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_RNN: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_SVDF: {
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_PAD: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_SQUEEZE: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_TRANSPOSE: {
+ if (inputCount != 2 || outputCount != 1) {
+ logInvalidInOutNumber(2, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_STRIDED_SLICE: {
+ if (inputCount != 6 || outputCount != 1) {
+ logInvalidInOutNumber(6, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_DIV: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_SUB: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_FLOAT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ case ANEURALNETWORKS_MEAN: {
+ if (inputCount != 3 || outputCount != 1) {
+ logInvalidInOutNumber(3, 1);
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ auto inputType = operands[inputIndexes[0]].type;
+ std::vector<OperandType> inExpectedTypes;
+ std::vector<OperandType> outExpectedTypes;
+ if (inputType == OperandType::TENSOR_FLOAT32) {
+ inExpectedTypes = {OperandType::TENSOR_FLOAT32,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_FLOAT32};
+ } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+ inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
+ OperandType::TENSOR_INT32,
+ OperandType::INT32};
+ outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
+ } else {
+ LOG(ERROR) << "Unsupported input tensor type for operation "
+ << kOperationNames[opType];
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return validateOperationOperandTypes(operands,
+ inputCount, inputIndexes,
+ inExpectedTypes,
+ outputCount, outputIndexes,
+ outExpectedTypes);
+ }
+ default:
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+}
+
// Versioning
bool compliantWithV1_0(V1_0::OperationType) {
diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h
index 810a1df68..0dacac5cf 100644
--- a/nn/common/include/Utils.h
+++ b/nn/common/include/Utils.h
@@ -133,6 +133,10 @@ inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code)
int validateOperandType(const ANeuralNetworksOperandType& type, const char* tag, bool allowPartial);
int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
const char* tag);
+int validateOperation(ANeuralNetworksOperationType opType,
+ uint32_t inputCount, const uint32_t* inputIndexes,
+ uint32_t outputCount, const uint32_t* outputIndexes,
+ const std::vector<Operand>& operands);
inline size_t getSizeFromInts(int lower, int higher) {
return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
diff --git a/nn/runtime/ModelBuilder.cpp b/nn/runtime/ModelBuilder.cpp
index 22e27c6aa..d136f5b48 100644
--- a/nn/runtime/ModelBuilder.cpp
+++ b/nn/runtime/ModelBuilder.cpp
@@ -208,13 +208,8 @@ int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t input
LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operations type " << type;
return ANEURALNETWORKS_BAD_DATA;
}
- int n = validateOperandList(inputCount, inputs, operandCount(),
- "ANeuralNetworksModel_addOperation inputs");
- if (n != ANEURALNETWORKS_NO_ERROR) {
- return n;
- }
- n = validateOperandList(outputCount, outputs, operandCount(),
- "ANeuralNetworksModel_addOperation outputs");
+ int n = validateOperation(type, inputCount, inputs,
+ outputCount, outputs, mOperands);
if (n != ANEURALNETWORKS_NO_ERROR) {
return n;
}
diff --git a/nn/runtime/test/Android.bp b/nn/runtime/test/Android.bp
index ffbea39f2..cfd3c6d4a 100644
--- a/nn/runtime/test/Android.bp
+++ b/nn/runtime/test/Android.bp
@@ -28,6 +28,7 @@ cc_defaults {
"TestPartitioning.cpp",
"TestPartitioningRandom.cpp",
"TestTrivialModel.cpp",
+ "TestValidateOperations.cpp",
"TestValidation.cpp",
"TestWrapper.cpp",
],
diff --git a/nn/runtime/test/TestValidateOperations.cpp b/nn/runtime/test/TestValidateOperations.cpp
new file mode 100644
index 000000000..8dc3013af
--- /dev/null
+++ b/nn/runtime/test/TestValidateOperations.cpp
@@ -0,0 +1,774 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NeuralNetworksWrapper.h"
+#include "NeuralNetworksOEM.h"
+
+#include <gtest/gtest.h>
+
+using namespace android::nn::wrapper;
+
+namespace {
+
+static const int32_t kAvailableOperandCodes[] = {
+ ANEURALNETWORKS_FLOAT32,
+ ANEURALNETWORKS_INT32,
+ ANEURALNETWORKS_UINT32,
+ ANEURALNETWORKS_TENSOR_FLOAT32,
+ ANEURALNETWORKS_TENSOR_INT32,
+ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+ ANEURALNETWORKS_TENSOR_OEM_BYTE
+};
+
+class OperationTestBase {
+public:
+ OperationTestBase(ANeuralNetworksOperationType opCode,
+ std::vector<ANeuralNetworksOperandType> validInputs,
+ std::vector<ANeuralNetworksOperandType> validOutputs)
+ : mOpCode(opCode),
+ mValidInputs(std::move(validInputs)),
+ mValidOutputs(std::move(validOutputs)) {}
+
+ // Add each operand separately and add the operation using these operands.
+ // This function does not cover the cases that a operand used mutiple times.
+ int32_t addOperation(const std::vector<ANeuralNetworksOperandType>& inputs,
+ const std::vector<ANeuralNetworksOperandType>& outputs) {
+ ANeuralNetworksModel* model = nullptr;
+ ANeuralNetworksModel_create(&model);
+
+ uint32_t opIdx = 0;
+ std::vector<uint32_t> inputIds;
+ std::vector<uint32_t> outputIds;
+ for (uint32_t i = 0; i < inputs.size(); i++) {
+ ANeuralNetworksModel_addOperand(model, &inputs[i]);
+ inputIds.push_back(opIdx++);
+ }
+ for (uint32_t i = 0; i < outputs.size(); i++) {
+ ANeuralNetworksModel_addOperand(model, &outputs[i]);
+ outputIds.push_back(opIdx++);
+ }
+
+ int32_t result = ANeuralNetworksModel_addOperation(model, mOpCode,
+ static_cast<uint32_t>(inputIds.size()),
+ inputIds.data(),
+ static_cast<uint32_t>(outputIds.size()),
+ outputIds.data());
+ ANeuralNetworksModel_free(model);
+ return result;
+ }
+
+ bool testMutatingInputOperandCode() {
+ for (uint32_t i = 0; i < mValidInputs.size(); i++) {
+ ANeuralNetworksOperandType newType = mValidInputs[i];
+ int32_t originalOperandCode = mValidInputs[i].type;
+ for (int32_t newOperandCode : kAvailableOperandCodes) {
+ if (newOperandCode == originalOperandCode) {
+ continue;
+ }
+ newType.type = newOperandCode;
+ std::vector<ANeuralNetworksOperandType> inputs = mValidInputs;
+ inputs[i] = newType;
+ int32_t result = addOperation(inputs, mValidOutputs);
+ if (ANEURALNETWORKS_NO_ERROR == result) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool testMutatingOutputOperandCode() {
+ for (uint32_t i = 0; i < mValidOutputs.size(); i++) {
+ ANeuralNetworksOperandType newType = mValidOutputs[i];
+ int32_t originalOperandCode = mValidOutputs[i].type;
+ for (int32_t newOperandCode : kAvailableOperandCodes) {
+ if (newOperandCode == originalOperandCode) {
+ continue;
+ }
+ newType.type = newOperandCode;
+ std::vector<ANeuralNetworksOperandType> outputs = mValidOutputs;
+ outputs[i] = newType;
+ int32_t result = addOperation(mValidInputs, outputs);
+ if (ANEURALNETWORKS_NO_ERROR == result) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool testMutatingInputOperandCounts() {
+ std::vector<ANeuralNetworksOperandType> inputs = mValidInputs;
+ for (uint32_t i = 0; i < 5; i++) {
+ inputs.push_back(inputs[0]);
+ if (ANEURALNETWORKS_NO_ERROR == addOperation(inputs, mValidOutputs)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool testMutatingOutputOperandCounts() {
+ std::vector<ANeuralNetworksOperandType> outputs = mValidOutputs;
+ for (int i = 0; i < 5; i++) {
+ outputs.push_back(outputs[0]);
+ if (ANEURALNETWORKS_NO_ERROR == addOperation(mValidInputs, outputs)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+private:
+ ANeuralNetworksOperationType mOpCode;
+ // The dimensions in the ANeuralNetworksOperandType must outlive the test object.
+ std::vector<ANeuralNetworksOperandType> mValidInputs;
+ std::vector<ANeuralNetworksOperandType> mValidOutputs;
+};
+
+TEST(OperationValidationTest, DEQUANTIZE_float32) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 1.0f,
+ .zeroPoint = 0};
+ ANeuralNetworksOperandType output = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ OperationTestBase dequantizeTest(ANEURALNETWORKS_DEQUANTIZE, {input}, {output});
+
+ EXPECT_TRUE(dequantizeTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(dequantizeTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(dequantizeTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(dequantizeTest.testMutatingOutputOperandCounts());
+}
+
+void simpleMathOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input1 = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input1.scale = 0.5f;
+ }
+
+ ANeuralNetworksOperandType input2 = input1;
+ ANeuralNetworksOperandType output = input1;
+ ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ OperationTestBase simpleMathTest(operationCode, {input1, input2, activation}, {output});
+
+ EXPECT_TRUE(simpleMathTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(simpleMathTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(simpleMathTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(simpleMathTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, ADD_float32) {
+ simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, MUL_float32) {
+ simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, SUB_float32) {
+ simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, DIV_float32) {
+ simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, ADD_quant8) {
+ simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, MUL_quant8) {
+ simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void activationOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 1.f / 256;
+ }
+
+ ANeuralNetworksOperandType output = input;
+ OperationTestBase activationTest(operationCode, {input}, {output});
+
+ EXPECT_TRUE(activationTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(activationTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(activationTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(activationTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, L2_NORMALIZATION_float32) {
+ activationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, FLOOR_float32) {
+ activationOpTest(ANEURALNETWORKS_FLOOR, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, TANH_float32) {
+ activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, RELU_float32) {
+ activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, RELU1_float32) {
+ activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, RELU6_float32) {
+ activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, LOGISTIC_float32) {
+ activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, RELU_quant8) {
+ activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, RELU1_quant8) {
+ activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, RELU6_quant8) {
+ activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, LOGISTIC_quant8) {
+ activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void softmaxOpTest(int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 1.f / 256;
+ }
+
+ ANeuralNetworksOperandType output = input;
+ ANeuralNetworksOperandType beta = {.type = ANEURALNETWORKS_FLOAT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ OperationTestBase softmaxTest(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output});
+
+ EXPECT_TRUE(softmaxTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(softmaxTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(softmaxTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(softmaxTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, SOFTMAX_float32) {
+ softmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, SOFTMAX_quant8) {
+ softmaxOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void poolingOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 4, 4, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 1.f / 256;
+ }
+ ANeuralNetworksOperandType output = input;
+
+ ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ ANeuralNetworksOperandType padLeft = scalar;
+ ANeuralNetworksOperandType padRight = scalar;
+ ANeuralNetworksOperandType padTop = scalar;
+ ANeuralNetworksOperandType padBottom = scalar;
+ ANeuralNetworksOperandType strideWidth = scalar;
+ ANeuralNetworksOperandType strideHeight = scalar;
+ ANeuralNetworksOperandType filterWidth = scalar;
+ ANeuralNetworksOperandType filterHeight = scalar;
+ ANeuralNetworksOperandType activation = scalar;
+
+ OperationTestBase explicitPoolingTest(operationCode,
+ {input,
+ padLeft, padRight, padTop, padBottom,
+ strideWidth, strideHeight,
+ filterWidth, filterHeight,
+ activation},
+ {output});
+
+ EXPECT_TRUE(explicitPoolingTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(explicitPoolingTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(explicitPoolingTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(explicitPoolingTest.testMutatingOutputOperandCounts());
+
+ ANeuralNetworksOperandType padImplicit = scalar;
+ OperationTestBase implicitPoolingTest(operationCode,
+ {input,
+ padImplicit,
+ strideWidth, strideHeight,
+ filterWidth, filterHeight,
+ activation},
+ {output});
+
+ EXPECT_TRUE(implicitPoolingTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(implicitPoolingTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(implicitPoolingTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(implicitPoolingTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, AVERAGE_POOL_2D_float32) {
+ poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, MAX_POOL_2D_float32) {
+ poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, L2_POOL_2D_float32) {
+ poolingOpTest(ANEURALNETWORKS_L2_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, AVERAGE_POOL_2D_quant8) {
+ poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, MAX_POOL_2D_quant8) {
+ poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void spaceDepthOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 1.f / 256;
+ }
+
+ ANeuralNetworksOperandType block_size = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ ANeuralNetworksOperandType output = input;
+ OperationTestBase spaceDepthTest(operationCode, {input, block_size}, {output});
+
+ EXPECT_TRUE(spaceDepthTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(spaceDepthTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(spaceDepthTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(spaceDepthTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, SPACE_TO_DEPTH_float32) {
+ spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, DEPTH_TO_SPACE_float32) {
+ spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, SPACE_TO_DEPTH_quant8) {
+ spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, DEPTH_TO_SPACE_quant8) {
+ spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void spaceBatchOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 1.f / 256;
+ }
+
+ uint32_t blockDimensions[1] = {2};
+ ANeuralNetworksOperandType blockShape = {.type = ANEURALNETWORKS_TENSOR_INT32,
+ .dimensionCount = 1,
+ .dimensions = blockDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ ANeuralNetworksOperandType cropOrPadding = blockShape;
+ ANeuralNetworksOperandType output = input;
+ OperationTestBase spaceBatchTest(operationCode, {input, blockShape, cropOrPadding}, {output});
+
+ EXPECT_TRUE(spaceBatchTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(spaceBatchTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(spaceBatchTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(spaceBatchTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, SPACE_TO_BATCH_ND_float32) {
+ spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, BATCH_TO_SPACE_ND_float32) {
+ spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, SPACE_TO_BATCH_ND_quant8) {
+ spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, BATCH_TO_SPACE_ND_quant8) {
+ spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void transposeAndSqueezeOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 1.f / 256;
+ }
+
+ uint32_t blockDimensions[1] = {4};
+ ANeuralNetworksOperandType dims = {.type = ANEURALNETWORKS_TENSOR_INT32,
+ .dimensionCount = 1,
+ .dimensions = blockDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ ANeuralNetworksOperandType output = input;
+ OperationTestBase transposeAndSqueezeTest(operationCode, {input, dims}, {output});
+
+ EXPECT_TRUE(transposeAndSqueezeTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(transposeAndSqueezeTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(transposeAndSqueezeTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(transposeAndSqueezeTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, TRANSPOSE_float32) {
+ transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, SQUEEZE_float32) {
+ transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, TRANSPOSE_quant8) {
+ transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, SQUEEZE_quant8) {
+ transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void convOpTest(int32_t operandCode) {
+ uint32_t inputDimensions[4] = {2, 4, 4, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 0.5f;
+ }
+
+ ANeuralNetworksOperandType filter = input;
+ ANeuralNetworksOperandType output = input;
+
+ uint32_t biasDimensions[1] = {2};
+ ANeuralNetworksOperandType bias = {.type = operandCode,
+ .dimensionCount = 1,
+ .dimensions = biasDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ bias.type = ANEURALNETWORKS_TENSOR_INT32;
+ bias.scale = 0.25f;
+ }
+
+ ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ ANeuralNetworksOperandType padLeft = scalar;
+ ANeuralNetworksOperandType padRight = scalar;
+ ANeuralNetworksOperandType padTop = scalar;
+ ANeuralNetworksOperandType padBottom = scalar;
+ ANeuralNetworksOperandType strideWidth = scalar;
+ ANeuralNetworksOperandType strideHeight = scalar;
+ ANeuralNetworksOperandType activation = scalar;
+
+ OperationTestBase explicitConvTest(ANEURALNETWORKS_CONV_2D,
+ {input, filter, bias,
+ padLeft, padRight, padTop, padBottom,
+ strideWidth, strideHeight,
+ activation},
+ {output});
+
+ EXPECT_TRUE(explicitConvTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(explicitConvTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(explicitConvTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(explicitConvTest.testMutatingOutputOperandCounts());
+
+ ANeuralNetworksOperandType padImplicit = scalar;
+ OperationTestBase implicitConvTest(ANEURALNETWORKS_CONV_2D,
+ {input, filter, bias,
+ padImplicit,
+ strideWidth, strideHeight,
+ activation},
+ {output});
+
+ EXPECT_TRUE(implicitConvTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(implicitConvTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(implicitConvTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(implicitConvTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, CONV_2D_float32) {
+ convOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, CONV_2D_quant8) {
+ convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void depthwiseConvOpTest(int32_t operandCode) {
+ uint32_t inputDimensions[4] = {1, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 0.5f;
+ }
+
+ ANeuralNetworksOperandType filter = input;
+ ANeuralNetworksOperandType output = input;
+
+ uint32_t biasDimensions[1] = {2};
+ ANeuralNetworksOperandType bias = {.type = operandCode,
+ .dimensionCount = 1,
+ .dimensions = biasDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ bias.type = ANEURALNETWORKS_TENSOR_INT32;
+ bias.scale = 0.25f;
+ }
+
+ ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ ANeuralNetworksOperandType padLeft = scalar;
+ ANeuralNetworksOperandType padRight = scalar;
+ ANeuralNetworksOperandType padTop = scalar;
+ ANeuralNetworksOperandType padBottom = scalar;
+ ANeuralNetworksOperandType strideWidth = scalar;
+ ANeuralNetworksOperandType strideHeight = scalar;
+ ANeuralNetworksOperandType multiplier = scalar;
+ ANeuralNetworksOperandType activation = scalar;
+
+ OperationTestBase explicitDepthwiseConvTest(ANEURALNETWORKS_DEPTHWISE_CONV_2D,
+ {input, filter, bias,
+ padLeft, padRight, padTop, padBottom,
+ strideWidth, strideHeight,
+ multiplier, activation},
+ {output});
+
+ EXPECT_TRUE(explicitDepthwiseConvTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(explicitDepthwiseConvTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(explicitDepthwiseConvTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(explicitDepthwiseConvTest.testMutatingOutputOperandCounts());
+
+ ANeuralNetworksOperandType padImplicit = scalar;
+ OperationTestBase implicitDepthwiseConvTest(ANEURALNETWORKS_DEPTHWISE_CONV_2D,
+ {input, filter, bias,
+ padImplicit,
+ strideWidth, strideHeight,
+ multiplier, activation},
+ {output});
+
+ EXPECT_TRUE(implicitDepthwiseConvTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(implicitDepthwiseConvTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(implicitDepthwiseConvTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(implicitDepthwiseConvTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, DEPTHWISE_CONV_2D_float32) {
+ depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8) {
+ depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void fullyConnectedOpTest(int32_t operandCode) {
+ uint32_t inputDimensions[2] = {5, 5};
+ ANeuralNetworksOperandType input = {.type = operandCode,
+ .dimensionCount = 2,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input.scale = 0.5f;
+ }
+
+ ANeuralNetworksOperandType weights = input;
+ ANeuralNetworksOperandType output = input;
+
+ uint32_t biasDimensions[1] = {5};
+ ANeuralNetworksOperandType bias = {.type = operandCode,
+ .dimensionCount = 1,
+ .dimensions = biasDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ bias.type = ANEURALNETWORKS_TENSOR_INT32;
+ bias.scale = 0.25f;
+ }
+
+ ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ OperationTestBase fullyConnectedTest(ANEURALNETWORKS_FULLY_CONNECTED,
+ {input, weights, bias, activation},
+ {output});
+
+ EXPECT_TRUE(fullyConnectedTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(fullyConnectedTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(fullyConnectedTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(fullyConnectedTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, FULLY_CONNECTED_float32) {
+ fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, FULLY_CONNECTED_quant8) {
+ fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+void concatenationTest(int32_t operandCode) {
+ uint32_t inputDimensions[2] = {5, 5};
+ ANeuralNetworksOperandType input1 = {.type = operandCode,
+ .dimensionCount = 2,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
+ input1.scale = 0.5f;
+ }
+ ANeuralNetworksOperandType input2 = input1;
+ ANeuralNetworksOperandType output = input1;
+
+ ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+
+ OperationTestBase concat2Test(ANEURALNETWORKS_CONCATENATION,
+ {input1, input2, activation}, {output});
+
+ EXPECT_TRUE(concat2Test.testMutatingInputOperandCode());
+ EXPECT_TRUE(concat2Test.testMutatingOutputOperandCode());
+ EXPECT_TRUE(concat2Test.testMutatingOutputOperandCounts());
+
+ OperationTestBase concat1Test(ANEURALNETWORKS_CONCATENATION,
+ {input1, activation}, {output});
+
+ EXPECT_TRUE(concat1Test.testMutatingInputOperandCode());
+ EXPECT_TRUE(concat1Test.testMutatingOutputOperandCode());
+ EXPECT_TRUE(concat1Test.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, CONCATENATION_float32) {
+ concatenationTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+
+TEST(OperationValidationTest, CONCATENATION_quant8) {
+ concatenationTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+
+TEST(OperationValidationTest, RESIZE_BILINEAR_float32) {
+ uint32_t inputDimensions[4] = {2, 2, 2, 2};
+ ANeuralNetworksOperandType input = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
+ .dimensionCount = 4,
+ .dimensions = inputDimensions,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ ANeuralNetworksOperandType height = {.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr,
+ .scale = 0.0f,
+ .zeroPoint = 0};
+ ANeuralNetworksOperandType width = height;
+ ANeuralNetworksOperandType output = input;
+ OperationTestBase resizeTest(ANEURALNETWORKS_RESIZE_BILINEAR,
+ {input, height, width}, {output});
+
+ EXPECT_TRUE(resizeTest.testMutatingInputOperandCode());
+ EXPECT_TRUE(resizeTest.testMutatingInputOperandCounts());
+ EXPECT_TRUE(resizeTest.testMutatingOutputOperandCode());
+ EXPECT_TRUE(resizeTest.testMutatingOutputOperandCounts());
+}
+
+} // end namespace
diff --git a/nn/runtime/test/TestValidation.cpp b/nn/runtime/test/TestValidation.cpp
index f75d0a0b9..5c03bde19 100644
--- a/nn/runtime/test/TestValidation.cpp
+++ b/nn/runtime/test/TestValidation.cpp
@@ -75,12 +75,17 @@ protected:
ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32,
.dimensionCount = 1,
.dimensions = dimensions};
+ ANeuralNetworksOperandType scalarType{.type = ANEURALNETWORKS_INT32,
+ .dimensionCount = 0,
+ .dimensions = nullptr};
+
ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
+ ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &scalarType), ANEURALNETWORKS_NO_ERROR);
ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
- uint32_t inList[2]{0, 1};
- uint32_t outList[1]{2};
- ASSERT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_ADD, 2, inList, 1,
+ uint32_t inList[3]{0, 1, 2};
+ uint32_t outList[1]{3};
+ ASSERT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_ADD, 3, inList, 1,
outList),
ANEURALNETWORKS_NO_ERROR);
ASSERT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(mModel, 2, inList, 1, outList),