summaryrefslogtreecommitdiff
path: root/nn/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'nn/runtime')
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp2
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp68
-rw-r--r--nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp53
-rw-r--r--nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py25
4 files changed, 118 insertions, 30 deletions
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 55c6542f1..2c8024a22 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -485,7 +485,7 @@ const AccuracyCriteria kStrictCriteria = {
// broadcast or elementwise, e.g ADD, FLOOR.
const AccuracyCriteria kMediumCriteria = {
.float32 = {.bias = 1e-6f, .mse = 1e-8f, .atol = 1e-5f, .rtol = 1e-5f},
- .float16 = {.bias = 1e-3f, .mse = 1e-6f, .atol = 1e-2f, .rtol = 1e-2f},
+ .float16 = {.bias = 1e-3f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
.quant8AsymmSigned = {.bias = 1.2, .mse = 1.2, .atol = 2},
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
index 567ff0581..d84727d95 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
@@ -131,41 +131,51 @@ DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(LOGISTIC, V1_3, /*scale=*/1.f / 2
DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(TANH, V1_3, /*scale=*/1.f / 128, /*zeroPoint=*/0,
TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+static void castingOpConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
+ sameDimensionOpConstructor(dataType, rank, op);
+
+ // If it is casting to/from a FP16 data type, the source/destination should have a scale
+ // representable in FP16 to avoid precision loss.
+ if (op->inputs[0]->dataType == TestOperandType::TENSOR_FLOAT16) {
+ op->outputs[0]->scale = static_cast<_Float16>(op->outputs[0]->scale);
+ } else if (op->outputs[0]->dataType == TestOperandType::TENSOR_FLOAT16) {
+ op->inputs[0]->scale = static_cast<_Float16>(op->inputs[0]->scale);
+ }
+}
+
// Operations with output data type different from input.
-#define DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(op, ver, outType, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##outType##_##ver){ \
- .opType = TestOperationType::op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = TestHalVersion::ver, \
- .inputs = {INPUT_DEFAULT}, \
- .outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
- .constructor = sameDimensionOpConstructor};
-
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_0, /*outType=*/TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM);
+#define DEFINE_QUANTIZATION_OP_SIGNATURE(op, ver, outType, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##outType##_##ver){ \
+ .opType = TestOperationType::op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
+ .constructor = castingOpConstructor};
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_SYMM);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_0, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT16,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_QUANT8_SYMM);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_SYMM);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_QUANT8_SYMM);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT16,
- TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_2, /*outType=*/TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_FLOAT16);
+DEFINE_QUANTIZATION_OP_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
-DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_3,
- /*outType=*/TENSOR_QUANT8_ASYMM_SIGNED,
- TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_FLOAT16);
+DEFINE_QUANTIZATION_OP_SIGNATURE(QUANTIZE, V1_2, /*outType=*/TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16);
+
+DEFINE_QUANTIZATION_OP_SIGNATURE(QUANTIZE, V1_3,
+ /*outType=*/TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16);
#define DEFINE_CAST_SIGNATURE(ver, outType, ...) \
DEFINE_OPERATION_SIGNATURE(CAST_##outType##_##ver){ \
@@ -175,7 +185,7 @@ DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_3,
.version = TestHalVersion::ver, \
.inputs = {INPUT_DEFAULT}, \
.outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
- .constructor = sameDimensionOpConstructor};
+ .constructor = castingOpConstructor};
DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
diff --git a/nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp b/nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp
new file mode 100644
index 000000000..24ba83438
--- /dev/null
+++ b/nn/runtime/test/generated/spec_V1_3_cts_only/cast_mismatching_shapes.example.cpp
@@ -0,0 +1,53 @@
+// Generated from cast_mismatching_shapes.mod.py
+// DO NOT EDIT
+// clang-format off
+#include "TestHarness.h"
+using namespace test_helper;
+
+namespace generated_tests::cast_mismatching_shapes {
+
+const TestModel& get_test_model() {
+ static TestModel model = {
+ .expectFailure = true,
+ .expectedMultinomialDistributionTolerance = 0,
+ .isRelaxed = false,
+ .main = {
+ .inputIndexes = {0},
+ .operands = {{ // input0
+ .channelQuant = {},
+ .data = TestBuffer::createFromVector<int32_t>({1, 2, 3, 4, 5, 6}),
+ .dimensions = {2, 3},
+ .isIgnored = false,
+ .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .type = TestOperandType::TENSOR_INT32,
+ .zeroPoint = 0
+ }, { // output0
+ .channelQuant = {},
+ .data = TestBuffer::createFromVector<int32_t>({1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
+ .dimensions = {100},
+ .isIgnored = false,
+ .lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT,
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .type = TestOperandType::TENSOR_INT32,
+ .zeroPoint = 0
+ }},
+ .operations = {{
+ .inputs = {0},
+ .outputs = {1},
+ .type = TestOperationType::CAST
+ }},
+ .outputIndexes = {1}
+ },
+ .minSupportedVersion = TestHalVersion::UNKNOWN,
+ .referenced = {}
+ };
+ return model;
+}
+
+const auto dummy_test_model = TestModelManager::get().add("cast_mismatching_shapes", get_test_model());
+
+} // namespace generated_tests::cast_mismatching_shapes
+
diff --git a/nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py b/nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py
new file mode 100644
index 000000000..c718e5cc2
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3_cts_only/cast_mismatching_shapes.mod.py
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_INT32", "{2, 3}")
+output0 = Output("output0", "TENSOR_INT32", "{100}")
+
+model = Model().Operation("CAST", input0).To(output0)
+
+example = Example({
+ input0: [1, 2, 3, 4, 5, 6],
+ output0: [1, 2, 3, 4, 5, 6] + [0] * 94,
+}, model=model).ExpectFailure()