summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2020-05-28 01:14:04 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2020-05-28 01:14:04 +0000
commit684fc6a00d18db72e75d831f5388290655e2557c (patch)
treec2f4a65aca1ca847357b83e93b8f7d3ba440f153
parent1b8eefa075e0e24e91a003c63f6b00c91b62d396 (diff)
parent2299d26439f28d18aa9d4ef4c6605579ef9bc49a (diff)
downloadml-684fc6a00d18db72e75d831f5388290655e2557c.tar.gz
Snap for 6534196 from 2299d26439f28d18aa9d4ef4c6605579ef9bc49a to rvc-release
Change-Id: Id34f07d30e78e2a22c1ccb78a1ba7bb7a6998643
-rw-r--r--nn/common/Utils.cpp22
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp2
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp68
-rw-r--r--nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp53
-rw-r--r--nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py25
5 files changed, 138 insertions, 32 deletions
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index fedc8cb30..4e4d0e06a 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -26,7 +26,10 @@
#include <sys/system_properties.h>
#include <algorithm>
+#include <functional>
+#include <iostream>
#include <limits>
+#include <numeric>
#include <set>
#include <string>
#include <tuple>
@@ -1509,8 +1512,10 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
logInvalidInOutNumber(1, 1);
return ANEURALNETWORKS_BAD_DATA;
}
- auto inputType = operands[inputIndexes[0]].type;
- auto outputType = operands[outputIndexes[0]].type;
+ auto inputOperand = operands[inputIndexes[0]];
+ auto outputOperand = operands[outputIndexes[0]];
+ auto inputType = inputOperand.type;
+ auto outputType = outputOperand.type;
std::vector<OperandType> inExpectedTypes;
std::vector<OperandType> outExpectedTypes;
if ((inputType == OperandType::TENSOR_FLOAT16 ||
@@ -1536,6 +1541,19 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
LOG(ERROR) << "Unsupported data type for operation " << getOperationName(opType);
return ANEURALNETWORKS_BAD_DATA;
}
+ // Validate that output shape is equal to input shape if dimensions
+ // are already known.
+ auto getNumberOfElements = [](const hardware::hidl_vec<uint32_t>& dims) {
+ if (dims.size() == 0) {
+ return 0;
+ }
+ return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>());
+ };
+ if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 &&
+ getNumberOfElements(outputOperand.dimensions) != 0 &&
+ inputOperand.dimensions != outputOperand.dimensions) {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
return validateOperationOperandTypes(operands, inputCount, inputIndexes,
inExpectedTypes, outputCount, outputIndexes,
outExpectedTypes);
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 55c6542f1..2c8024a22 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -485,7 +485,7 @@ const AccuracyCriteria kStrictCriteria = {
// broadcast or elementwise, e.g ADD, FLOOR.
const AccuracyCriteria kMediumCriteria = {
.float32 = {.bias = 1e-6f, .mse = 1e-8f, .atol = 1e-5f, .rtol = 1e-5f},
- .float16 = {.bias = 1e-3f, .mse = 1e-6f, .atol = 1e-2f, .rtol = 1e-2f},
+ .float16 = {.bias = 1e-3f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
.quant8AsymmSigned = {.bias = 1.2, .mse = 1.2, .atol = 2},
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
index 567ff0581..d84727d95 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
@@ -131,41 +131,51 @@ DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(LOGISTIC, V1_3, /*scale=*/1.f / 2
DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(TANH, V1_3, /*scale=*/1.f / 128, /*zeroPoint=*/0,
TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+static void castingOpConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
+ sameDimensionOpConstructor(dataType, rank, op);
+
+ // If it is casting to/from a FP16 data type, the source/destination should have a scale
+ // representable in FP16 to avoid precision loss.
+ if (op->inputs[0]->dataType == TestOperandType::TENSOR_FLOAT16) {
+ op->outputs[0]->scale = static_cast<_Float16>(op->outputs[0]->scale);
+ } else if (op->outputs[0]->dataType == TestOperandType::TENSOR_FLOAT16) {
+ op->inputs[0]->scale = static_cast<_Float16>(op->inputs[0]->scale);
+ }
+}
+
// Operations with output data type different from input.
-#define DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(op, ver, outType, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##outType##_##ver){ \
- .opType = TestOperationType::op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = TestHalVersion::ver, \
- .inputs = {INPUT_DEFAULT}, \
- .outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
- .constructor = sameDimensionOpConstructor};
-
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_0, /*outType=*/TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM);
+#define DEFINE_QUANTIZATION_OP_SIGNATURE(op, ver, outType, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##outType##_##ver){ \
+ .opType = TestOperationType::op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
+ .constructor = castingOpConstructor};
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_SYMM);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_0, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT16,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_QUANT8_SYMM);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_SYMM);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_QUANT8_SYMM);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT16,
- TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_2, /*outType=*/TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_FLOAT16);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_3,
- /*outType=*/TENSOR_QUANT8_ASYMM_SIGNED,
- TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_FLOAT16);
+DEFINE_QUANTIZATION_OP_SIGNATURE(QUANTIZE, V1_2, /*outType=*/TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16);
+
+DEFINE_QUANTIZATION_OP_SIGNATURE(QUANTIZE, V1_3,
+ /*outType=*/TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16);
#define DEFINE_CAST_SIGNATURE(ver, outType, ...) \
DEFINE_OPERATION_SIGNATURE(CAST_##outType##_##ver){ \
@@ -175,7 +185,7 @@ DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_3,
.version = TestHalVersion::ver, \
.inputs = {INPUT_DEFAULT}, \
.outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
- .constructor = sameDimensionOpConstructor};
+ .constructor = castingOpConstructor};
DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
diff --git a/nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp b/nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp
new file mode 100644
index 000000000..24ba83438
--- /dev/null
+++ b/nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp
@@ -0,0 +1,53 @@
+// Generated from cast_mismatching_shapes.mod.py
+// DO NOT EDIT
+// clang-format off
+#include "TestHarness.h"
+using namespace test_helper;
+
+namespace generated_tests::cast_mismatching_shapes {
+
+const TestModel& get_test_model() {
+ static TestModel model = {
+ .expectFailure = true,
+ .expectedMultinomialDistributionTolerance = 0,
+ .isRelaxed = false,
+ .main = {
+ .inputIndexes = {0},
+ .operands = {{ // input0
+ .channelQuant = {},
+ .data = TestBuffer::createFromVector<int32_t>({1, 2, 3, 4, 5, 6}),
+ .dimensions = {2, 3},
+ .isIgnored = false,
+ .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .type = TestOperandType::TENSOR_INT32,
+ .zeroPoint = 0
+ }, { // output0
+ .channelQuant = {},
+ .data = TestBuffer::createFromVector<int32_t>({1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
+ .dimensions = {100},
+ .isIgnored = false,
+ .lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT,
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .type = TestOperandType::TENSOR_INT32,
+ .zeroPoint = 0
+ }},
+ .operations = {{
+ .inputs = {0},
+ .outputs = {1},
+ .type = TestOperationType::CAST
+ }},
+ .outputIndexes = {1}
+ },
+ .minSupportedVersion = TestHalVersion::UNKNOWN,
+ .referenced = {}
+ };
+ return model;
+}
+
+const auto dummy_test_model = TestModelManager::get().add("cast_mismatching_shapes", get_test_model());
+
+} // namespace generated_tests::cast_mismatching_shapes
+
diff --git a/nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py b/nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py
new file mode 100644
index 000000000..c718e5cc2
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_INT32", "{2, 3}")
+output0 = Output("output0", "TENSOR_INT32", "{100}")
+
+model = Model().Operation("CAST", input0).To(output0)
+
+example = Example({
+ input0: [1, 2, 3, 4, 5, 6],
+ output0: [1, 2, 3, 4, 5, 6] + [0] * 94,
+}, model=model).ExpectFailure()