summaryrefslogtreecommitdiff
path: root/nn/runtime/test/fuzzing
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2020-03-06 16:21:56 -0800
committerXusong Wang <xusongw@google.com>2020-03-09 16:09:28 -0700
commit1120efcc605bdf7b50c14c89683591400b5801e8 (patch)
treea469062490ef7d6fc2aa1e395b075406fadb4ec2 /nn/runtime/test/fuzzing
parent4bceb192ebcb6f9e395d2094af7c4b18a9404384 (diff)
downloadml-1120efcc605bdf7b50c14c89683591400b5801e8.tar.gz
Add new operand types to RGG.
NNAPI 1.3 introduces the following new types to the existing operations: - Add TENSOR_QUANT8_ASYMM_SIGNED to all supported operations - Add TENSOR_INT32 to all supported operations - Add CAST with input/output of the same type Bug: 141704517 Test: NNT_static_fuzzing Change-Id: I2feb5542fcaffa4a78880d66f71d142f2bf47132
Diffstat (limited to 'nn/runtime/test/fuzzing')
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp71
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp122
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp25
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp47
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp565
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp29
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp10
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp37
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h52
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp4
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp31
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp489
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Resize.cpp5
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Selection.cpp136
14 files changed, 963 insertions, 660 deletions
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 702f1d5e5..4b051502f 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -428,6 +428,7 @@ const AccuracyCriteria kStrictCriteria = {
.float16 = {.bias = 1e-4f, .mse = 1e-8f, .atol = 1e-3f, .rtol = 1e-3f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
+ .quant8AsymmSigned = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
.quant8Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
.quant16Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
.quant16Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
@@ -441,6 +442,7 @@ const AccuracyCriteria kMediumCriteria = {
.float16 = {.bias = 1e-3f, .mse = 1e-6f, .atol = 1e-2f, .rtol = 1e-2f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
+ .quant8AsymmSigned = {.bias = 1.2, .mse = 1.2, .atol = 2},
.quant8Symm = {.bias = 1.2, .mse = 1.2, .atol = 2},
.quant16Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
.quant16Symm = {.bias = 1.2, .mse = 1.2, .atol = 2},
@@ -454,6 +456,7 @@ const AccuracyCriteria kRelaxedCriteria = {
.float16 = {.bias = 5e-3f, .mse = 1e-3f, .atol = 1.0f, .rtol = 1.0f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
+ .quant8AsymmSigned = {.bias = 1.5, .mse = 1.5, .atol = 10},
.quant8Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
.quant16Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
.quant16Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
@@ -465,6 +468,7 @@ const AccuracyCriteria kConvCriteria = {
.float16 = {.bias = 5e-2f, .mse = 1e-2f, .atol = 1.0f, .rtol = 1.0f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
+ .quant8AsymmSigned = {.bias = 1.5, .mse = 1.5, .atol = 10},
.quant8Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
.quant16Asymm = {.bias = 1.5, .mse = 1.5, .atol = 10},
.quant16Symm = {.bias = 1.5, .mse = 1.5, .atol = 10},
@@ -550,6 +554,7 @@ TEST_SINGLE_OPERATION(LOCAL_RESPONSE_NORMALIZATION, V1_2, kRelaxedCriteria);
TEST_SINGLE_OPERATION(DEQUANTIZE, V1_2, kMediumCriteria);
TEST_SINGLE_OPERATION(SQUEEZE, V1_2, kStrictCriteria);
TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_2, kStrictCriteria);
+TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_2, kStrictCriteria);
/*-- NNAPI 1.2 Operations ---------------------------------------------------*/
@@ -613,6 +618,70 @@ TEST_SINGLE_OPERATION(ROI_ALIGN, V1_2, kRelaxedCriteria);
TEST_SINGLE_OPERATION(ROI_POOLING, V1_2, kRelaxedCriteria);
TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_2, kRelaxedCriteria);
+/*-- NNAPI 1.0, 1.1, and 1.2 Operations with Extended Behavior in 1.3 -------------*/
+
+TEST_SINGLE_OPERATION(ADD, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(AVERAGE_POOL_2D, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(CONCATENATION, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(CONV_2D, V1_3, kConvCriteria);
+TEST_SINGLE_OPERATION(DEPTHWISE_CONV_2D, V1_3, kConvCriteria);
+TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(DEQUANTIZE, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(EMBEDDING_LOOKUP, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(L2_NORMALIZATION, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(LOGISTIC, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(MAX_POOL_2D, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(MUL, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU1, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(RELU6, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(RESHAPE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(RESIZE_BILINEAR, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SOFTMAX, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(TANH, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(DIV, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(MEAN, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(PAD, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(SQUEEZE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(STRIDED_SLICE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(SUB, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(TRANSPOSE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(ABS, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(ARGMAX, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(ARGMIN, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(CAST, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(CHANNEL_SHUFFLE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(EQUAL, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(EXPAND_DIMS, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(GATHER, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(GREATER, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(GREATER_EQUAL, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(GROUPED_CONV_2D, V1_3, kConvCriteria);
+TEST_SINGLE_OPERATION(HEATMAP_MAX_KEYPOINT, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(LESS, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(LESS_EQUAL, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(MAXIMUM, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(MINIMUM, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(NOT_EQUAL, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(PAD_V2, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(PRELU, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(QUANTIZE, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(REDUCE_MAX, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(REDUCE_MIN, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(ROI_ALIGN, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(ROI_POOLING, V1_3, kRelaxedCriteria);
+TEST_SINGLE_OPERATION(SELECT, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(SLICE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(SPLIT, V1_3, kMediumCriteria);
+TEST_SINGLE_OPERATION(TILE, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(TOPK_V2, V1_3, kStrictCriteria);
+TEST_SINGLE_OPERATION(TRANSPOSE_CONV_2D, V1_3, kConvCriteria);
+TEST_SINGLE_OPERATION(RESIZE_NEAREST_NEIGHBOR, V1_3, kRelaxedCriteria);
+
/*-- NNAPI 1.3 Operations ---------------------------------------------------*/
// TODO: The following 1.3 operation signatures are currently not defined:
@@ -630,6 +699,7 @@ const AccuracyCriteria kSmallGraphCriteria = {
.float16 = {.bias = 5e-2f, .mse = 1e-2f, .atol = 1.0f, .rtol = 1.0f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 2, .mse = 2, .atol = 12},
+ .quant8AsymmSigned = {.bias = 2, .mse = 2, .atol = 12},
.quant8Symm = {.bias = 2, .mse = 2, .atol = 12},
.quant16Asymm = {.bias = 2, .mse = 2, .atol = 12},
.quant16Symm = {.bias = 2, .mse = 2, .atol = 12},
@@ -640,6 +710,7 @@ const AccuracyCriteria kLargeGraphCriteria = {
.float16 = {.bias = 1e-1f, .mse = 5e-2f, .atol = 1.0f, .rtol = 1.0f},
.int32 = {.atol = 1},
.quant8Asymm = {.bias = 2, .mse = 2, .atol = 12},
+ .quant8AsymmSigned = {.bias = 2, .mse = 2, .atol = 12},
.quant8Symm = {.bias = 2, .mse = 2, .atol = 12},
.quant16Asymm = {.bias = 2, .mse = 2, .atol = 12},
.quant16Symm = {.bias = 2, .mse = 2, .atol = 12},
diff --git a/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp b/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
index 65271b571..6c776416f 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
@@ -25,7 +25,7 @@ namespace fuzzing_test {
static void roiTensorConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) {
op->dataType = dataType;
- if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) {
+ if (isQuantizedType(dataType)) {
op->dataType = TestOperandType::TENSOR_QUANT16_ASYMM;
op->scale = 0.125f;
op->zeroPoint = 0;
@@ -120,49 +120,57 @@ static void roiFinalizer(RandomOperation* op) {
// accuracy evaluation.
// 2. There is no actual graph that uses this data type on bounding boxes.
-DEFINE_OPERATION_SIGNATURE(ROI_ALIGN_V1_2){
- .opType = TestOperationType::ROI_ALIGN,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- kInputRoiTensor,
- PARAMETER_NONE(TestOperandType::TENSOR_INT32),
- RANDOM_INT_FREE,
- RANDOM_INT_FREE,
- PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
- PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
- PARAMETER_RANGE(TestOperandType::INT32, 0, 10),
- PARAMETER_RANGE(TestOperandType::INT32, 0, 10),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = roiConstructor,
- .finalizer = roiFinalizer};
-
-DEFINE_OPERATION_SIGNATURE(ROI_POOLING_V1_2){
- .opType = TestOperationType::ROI_POOLING,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- kInputRoiTensor,
- PARAMETER_NONE(TestOperandType::TENSOR_INT32),
- RANDOM_INT_FREE,
- RANDOM_INT_FREE,
- PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
- PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = roiConstructor,
- .finalizer = roiFinalizer};
+#define DEFINE_ROI_ALIGN_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(ROI_ALIGN_##ver){ \
+ .opType = TestOperationType::ROI_ALIGN, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ kInputRoiTensor, \
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ RANDOM_INT_FREE, \
+ RANDOM_INT_FREE, \
+ PARAMETER_FLOAT_RANGE(0.1f, 10.0f), \
+ PARAMETER_FLOAT_RANGE(0.1f, 10.0f), \
+ PARAMETER_RANGE(TestOperandType::INT32, 0, 10), \
+ PARAMETER_RANGE(TestOperandType::INT32, 0, 10), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = roiConstructor, \
+ .finalizer = roiFinalizer};
+
+DEFINE_ROI_ALIGN_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ROI_ALIGN_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+#define DEFINE_ROI_POOLING_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(ROI_POOLING_##ver){ \
+ .opType = TestOperationType::ROI_POOLING, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ kInputRoiTensor, \
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ RANDOM_INT_FREE, \
+ RANDOM_INT_FREE, \
+ PARAMETER_FLOAT_RANGE(0.1f, 10.0f), \
+ PARAMETER_FLOAT_RANGE(0.1f, 10.0f), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = roiConstructor, \
+ .finalizer = roiFinalizer};
+
+DEFINE_ROI_POOLING_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ROI_POOLING_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void heatmapMaxKeypointConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -206,17 +214,21 @@ static void heatmapMaxKeypointFinalizer(RandomOperation* op) {
}
}
-DEFINE_OPERATION_SIGNATURE(HEATMAP_MAX_KEYPOINT_V1_2){
- .opType = TestOperationType::HEATMAP_MAX_KEYPOINT,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, kInputRoiTensor,
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
- .outputs = {OUTPUT_DEFAULT, kOutputRoiTensor},
- .constructor = heatmapMaxKeypointConstructor,
- .finalizer = heatmapMaxKeypointFinalizer};
+#define DEFINE_HEATMAP_MAX_KEYPOINT_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(HEATMAP_MAX_KEYPOINT_##ver){ \
+ .opType = TestOperationType::HEATMAP_MAX_KEYPOINT, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, kInputRoiTensor, \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
+ .outputs = {OUTPUT_DEFAULT, kOutputRoiTensor}, \
+ .constructor = heatmapMaxKeypointConstructor, \
+ .finalizer = heatmapMaxKeypointFinalizer};
+
+DEFINE_HEATMAP_MAX_KEYPOINT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_HEATMAP_MAX_KEYPOINT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
index 0ecba0439..babae5d15 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
@@ -43,7 +43,7 @@ static void broadcastOpConstructor(TestOperandType dataType, uint32_t rank, Rand
}
// MUL requires output.scale > input0.scale * input1.scale.
- if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM && op->opType == TestOperationType::MUL) {
+ if (isQuantizedType(dataType) && op->opType == TestOperationType::MUL) {
float minScale = op->inputs[0]->scale * op->inputs[1]->scale;
op->outputs[0]->scale = getUniform(minScale, minScale * 5);
}
@@ -53,6 +53,13 @@ static void broadcastOpConstructor(TestOperandType dataType, uint32_t rank, Rand
if (op->opType == TestOperationType::DIV || op->opType == TestOperationType::POW) {
op->outputs[0]->doNotConnect = true;
}
+
+ // For ADD/MUL/SUB/DIV with TENSOR_INT32 tensors, the activation must be "NONE".
+ if ((op->opType == TestOperationType::ADD || op->opType == TestOperationType::MUL ||
+ op->opType == TestOperationType::SUB || op->opType == TestOperationType::DIV) &&
+ dataType == TestOperandType::TENSOR_INT32) {
+ op->inputs[2]->setScalarValue(0);
+ }
}
// For broadcast operations with fused activation.
@@ -79,6 +86,13 @@ DEFINE_BROADCAST_WITH_ACT_SIGNATURE(MUL, V1_2, TestOperandType::TENSOR_FLOAT16);
DEFINE_BROADCAST_WITH_ACT_SIGNATURE(SUB, V1_2, TestOperandType::TENSOR_FLOAT16,
TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(ADD, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_INT32);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(MUL, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_INT32);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(SUB, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_INT32);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_3, TestOperandType::TENSOR_INT32);
// For broadcast ops with output of the same data type as inputs.
#define DEFINE_BROADCAST_SIGNATURE(op, ver, ...) \
@@ -101,6 +115,9 @@ DEFINE_BROADCAST_SIGNATURE(MAXIMUM, V1_2, TestOperandType::TENSOR_FLOAT32,
DEFINE_BROADCAST_SIGNATURE(MINIMUM, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
TestOperandType::TENSOR_INT32);
+DEFINE_BROADCAST_SIGNATURE(PRELU, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_BROADCAST_SIGNATURE(MAXIMUM, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_BROADCAST_SIGNATURE(MINIMUM, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
// Logical
DEFINE_BROADCAST_SIGNATURE(LOGICAL_AND, V1_2, TestOperandType::TENSOR_BOOL8);
@@ -135,6 +152,12 @@ DEFINE_COMPARISON_SIGNATURE(LESS_EQUAL, V1_2, TestOperandType::TENSOR_FLOAT32,
DEFINE_COMPARISON_SIGNATURE(NOT_EQUAL, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_BOOL8);
+DEFINE_COMPARISON_SIGNATURE(EQUAL, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_COMPARISON_SIGNATURE(GREATER, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_COMPARISON_SIGNATURE(GREATER_EQUAL, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_COMPARISON_SIGNATURE(LESS, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_COMPARISON_SIGNATURE(LESS_EQUAL, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_COMPARISON_SIGNATURE(NOT_EQUAL, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp b/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
index 8d04c784b..22020cfc3 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
@@ -95,6 +95,29 @@ DEFINE_OPERATION_SIGNATURE(CONCAT_3_V1_2){
concatConstructor(/*numInputs=*/3, /*isV1_0=*/false, rank, op);
}};
+DEFINE_OPERATION_SIGNATURE(CONCAT_2_V1_3){
+ .opType = TestOperationType::CONCATENATION,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
+ concatConstructor(/*numInputs=*/2, /*isV1_0=*/false, rank, op);
+ }};
+
+DEFINE_OPERATION_SIGNATURE(CONCAT_3_V1_3){
+ .opType = TestOperationType::CONCATENATION,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_DEFAULT,
+ PARAMETER_NONE(TestOperandType::INT32)},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
+ concatConstructor(/*numInputs=*/3, /*isV1_0=*/false, rank, op);
+ }};
+
// SPLIT with fixed number of splits.
static void splitConstructor(uint32_t numSplits, uint32_t rank, RandomOperation* op) {
int32_t axis = getUniform<int32_t>(-rank, rank - 1);
@@ -145,6 +168,30 @@ DEFINE_OPERATION_SIGNATURE(SPLIT_3_V1_2){
splitConstructor(/*numSplits=*/3, rank, op);
}};
+DEFINE_OPERATION_SIGNATURE(SPLIT_2_V1_3){
+ .opType = TestOperationType::SPLIT,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_CHOICE(TestOperandType::INT32, 2)},
+ .outputs = {OUTPUT_DEFAULT, OUTPUT_DEFAULT},
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
+ splitConstructor(/*numSplits=*/2, rank, op);
+ }};
+
+DEFINE_OPERATION_SIGNATURE(SPLIT_3_V1_3){
+ .opType = TestOperationType::SPLIT,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_CHOICE(TestOperandType::INT32, 3)},
+ .outputs = {OUTPUT_DEFAULT, OUTPUT_DEFAULT, OUTPUT_DEFAULT},
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
+ splitConstructor(/*numSplits=*/3, rank, op);
+ }};
+
} // namespace fuzzing_test
} // namespace nn
} // namespace android
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp b/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp
index df2d048ec..d8ab4e4ba 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp
@@ -170,100 +170,96 @@ DEFINE_CONV_2D_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_CONV_2D_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16,
TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_CONV_2D_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
-DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_layout_V1_2){
- .opType = TestOperationType::CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(conv2DExplicitConstructor, _1, _2, TestHalVersion::V1_2, _3)};
-
-DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_layout_V1_2){
- .opType = TestOperationType::CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(conv2DImplicitConstructor, _1, _2, TestHalVersion::V1_2, _3)};
-
-DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_dilation_V1_2){
- .opType = TestOperationType::CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(conv2DExplicitConstructor, _1, _2, TestHalVersion::V1_2, _3)};
-
-DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_dilation_V1_2){
- .opType = TestOperationType::CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(conv2DImplicitConstructor, _1, _2, TestHalVersion::V1_2, _3)};
+#define DEFINE_CONV_2D_WITH_LAYOUT_OR_DILATION_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_layout_##ver){ \
+ .opType = TestOperationType::CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(conv2DExplicitConstructor, _1, _2, TestHalVersion::ver, _3)}; \
+ DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_layout_##ver){ \
+ .opType = TestOperationType::CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(conv2DImplicitConstructor, _1, _2, TestHalVersion::ver, _3)}; \
+ DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_dilation_##ver){ \
+ .opType = TestOperationType::CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(conv2DExplicitConstructor, _1, _2, TestHalVersion::ver, _3)}; \
+ DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_dilation_##ver){ \
+ .opType = TestOperationType::CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(conv2DImplicitConstructor, _1, _2, TestHalVersion::ver, _3)};
+
+DEFINE_CONV_2D_WITH_LAYOUT_OR_DILATION_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_CONV_2D_WITH_LAYOUT_OR_DILATION_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void depthwiseConv2DExplicitConstructor(TestOperandType, uint32_t rank, TestHalVersion ver,
RandomOperation* op) {
@@ -420,108 +416,105 @@ DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16,
TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
-DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_layout_V1_2){
- .opType = TestOperationType::DEPTHWISE_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(depthwiseConv2DExplicitConstructor, _1, _2, TestHalVersion::V1_2,
- _3)};
-
-DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_layout_V1_2){
- .opType = TestOperationType::DEPTHWISE_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(depthwiseConv2DImplicitConstructor, _1, _2, TestHalVersion::V1_2,
- _3)};
-
-DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_dilation_V1_2){
- .opType = TestOperationType::DEPTHWISE_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(depthwiseConv2DExplicitConstructor, _1, _2, TestHalVersion::V1_2,
- _3)};
-
-DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_dilation_V1_2){
- .opType = TestOperationType::DEPTHWISE_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = std::bind(depthwiseConv2DImplicitConstructor, _1, _2, TestHalVersion::V1_2,
- _3)};
+#define DEFINE_DEPTHWISE_CONV_2D_WITH_LAYOUT_OR_DILATION_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_layout_##ver){ \
+ .opType = TestOperationType::DEPTHWISE_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 5), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(depthwiseConv2DExplicitConstructor, _1, _2, \
+ TestHalVersion::ver, _3)}; \
+ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_layout_##ver){ \
+ .opType = TestOperationType::DEPTHWISE_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 5), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(depthwiseConv2DImplicitConstructor, _1, _2, \
+ TestHalVersion::ver, _3)}; \
+ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_dilation_##ver){ \
+ .opType = TestOperationType::DEPTHWISE_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 5), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(depthwiseConv2DExplicitConstructor, _1, _2, \
+ TestHalVersion::ver, _3)}; \
+ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_dilation_##ver){ \
+ .opType = TestOperationType::DEPTHWISE_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 5), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = std::bind(depthwiseConv2DImplicitConstructor, _1, _2, \
+ TestHalVersion::ver, _3)};
+
+DEFINE_DEPTHWISE_CONV_2D_WITH_LAYOUT_OR_DILATION_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_DEPTHWISE_CONV_2D_WITH_LAYOUT_OR_DILATION_SIGNATURE(
+ V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void groupedConv2DExplicitConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -628,52 +621,53 @@ static void groupedConv2DImplicitConstructor(TestOperandType, uint32_t rank, Ran
setConvFCScale(/*applyOutputScaleBound=*/false, op);
}
-DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_explicit_V1_2){
- .opType = TestOperationType::GROUPED_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = groupedConv2DExplicitConstructor};
-
-DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_implicit_V1_2){
- .opType = TestOperationType::GROUPED_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = groupedConv2DImplicitConstructor};
+#define DEFINE_GROUPED_CONV_2D_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_explicit_##ver){ \
+ .opType = TestOperationType::GROUPED_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 5), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = groupedConv2DExplicitConstructor}; \
+ DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_implicit_##ver){ \
+ .opType = TestOperationType::GROUPED_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 5), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = groupedConv2DImplicitConstructor};
+
+DEFINE_GROUPED_CONV_2D_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_GROUPED_CONV_2D_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void transposeConv2DExplicitConstructor(TestOperandType, uint32_t rank,
RandomOperation* op) {
@@ -764,51 +758,52 @@ static void transposeConv2DImplicitConstructor(TestOperandType, uint32_t rank,
setConvFCScale(/*applyOutputScaleBound=*/false, op);
}
-DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_explicit_V1_2){
- .opType = TestOperationType::TRANSPOSE_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = transposeConv2DExplicitConstructor};
-
-DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_implicit_V1_2){
- .opType = TestOperationType::TRANSPOSE_CONV_2D,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs =
- {
- INPUT_DEFAULT,
- INPUT_DEFAULT,
- INPUT_BIAS,
- PARAMETER_NONE(TestOperandType::TENSOR_INT32),
- PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
- },
- .outputs = {OUTPUT_DEFAULT},
- .constructor = transposeConv2DImplicitConstructor};
+#define DEFINE_TRANSPOSE_CONV_2D_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_explicit_##ver){ \
+ .opType = TestOperationType::TRANSPOSE_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = transposeConv2DExplicitConstructor}; \
+ DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_implicit_##ver){ \
+ .opType = TestOperationType::TRANSPOSE_CONV_2D, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ INPUT_DEFAULT, \
+ INPUT_BIAS, \
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = transposeConv2DImplicitConstructor};
+
+DEFINE_TRANSPOSE_CONV_2D_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_TRANSPOSE_CONV_2D_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
index d55b7fa6d..1c07497b0 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
@@ -42,6 +42,9 @@ DEFINE_ELEMENTWISE_SIGNATURE(LOGISTIC, V1_2, TestOperandType::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE(RELU, V1_2, TestOperandType::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE(RELU1, V1_2, TestOperandType::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU1, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
DEFINE_ELEMENTWISE_SIGNATURE(HARD_SWISH, V1_3, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM);
@@ -64,6 +67,7 @@ DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(NEG, V1_2, TestOperandType::TENSOR_FLOAT
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SIN, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOGICAL_NOT, V1_2, TestOperandType::TENSOR_BOOL8);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(ABS, V1_3, TestOperandType::TENSOR_INT32);
// LOG, SQRT, and RSQRT may produce NaN output values. We should not connect the output tensor to
// the input of another operation.
@@ -105,6 +109,11 @@ DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(LOGISTIC, V1_0, /*scale=*/1.f / 2
DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(TANH, V1_2, /*scale=*/1.f / 128, /*zeroPoint=*/128,
TestOperandType::TENSOR_FLOAT16,
TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(LOGISTIC, V1_3, /*scale=*/1.f / 256,
+ /*zeroPoint=*/-128,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(TANH, V1_3, /*scale=*/1.f / 128, /*zeroPoint=*/0,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
// Operations with output data type different from input.
#define DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(op, ver, outType, ...) \
@@ -127,10 +136,21 @@ DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TEN
TestOperandType::TENSOR_QUANT8_ASYMM,
TestOperandType::TENSOR_QUANT8_SYMM);
+DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_3, /*outType=*/TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_2, /*outType=*/TENSOR_QUANT8_ASYMM,
TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_3,
+ /*outType=*/TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+
#define DEFINE_CAST_SIGNATURE(ver, outType, ...) \
DEFINE_OPERATION_SIGNATURE(CAST_##outType##_##ver){ \
.opType = TestOperationType::CAST, \
@@ -157,6 +177,15 @@ DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_INT32, TestOperandType::TENSOR_FL
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
TestOperandType::TENSOR_INT32);
+DEFINE_CAST_SIGNATURE(V1_3, /*outType=*/TENSOR_BOOL8, TestOperandType::TENSOR_BOOL8);
+DEFINE_CAST_SIGNATURE(V1_3, /*outType=*/TENSOR_INT32, TestOperandType::TENSOR_INT32);
+DEFINE_CAST_SIGNATURE(V1_3, /*outType=*/TENSOR_QUANT16_ASYMM,
+ TestOperandType::TENSOR_QUANT16_ASYMM);
+DEFINE_CAST_SIGNATURE(V1_3, /*outType=*/TENSOR_QUANT16_SYMM, TestOperandType::TENSOR_QUANT16_SYMM);
+DEFINE_CAST_SIGNATURE(V1_3, /*outType=*/TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_CAST_SIGNATURE(V1_3, /*outType=*/TENSOR_QUANT8_SYMM, TestOperandType::TENSOR_QUANT8_SYMM);
+
DEFINE_OPERATION_SIGNATURE(ELU_V1_3){
.opType = TestOperationType::ELU,
.supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
diff --git a/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp b/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp
index 1cb850fd7..ad08d409e 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp
@@ -67,6 +67,16 @@ DEFINE_OPERATION_SIGNATURE(signature_FULLY_CONNECTED_V1_2){
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(fullyConnectedConstructor, _1, _2, TestHalVersion::V1_2, _3)};
+DEFINE_OPERATION_SIGNATURE(signature_FULLY_CONNECTED_V1_3){
+ .opType = TestOperationType::FULLY_CONNECTED,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_BIAS,
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3)},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = std::bind(fullyConnectedConstructor, _1, _2, TestHalVersion::V1_3, _3)};
+
} // namespace fuzzing_test
} // namespace nn
} // namespace android
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
index 165776269..89ba36040 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
@@ -69,6 +69,25 @@ DEFINE_OPERATION_SIGNATURE(SOFTMAX_axis_V1_2){
.outputs = {OUTPUT_QUANT(1.f / 256, 0)},
.constructor = softmaxConstructor};
+DEFINE_OPERATION_SIGNATURE(SOFTMAX_V1_3){
+ .opType = TestOperationType::SOFTMAX,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0)},
+ .outputs = {OUTPUT_QUANT(1.f / 256, -128)},
+ .constructor = softmaxConstructor};
+
+DEFINE_OPERATION_SIGNATURE(SOFTMAX_axis_V1_3){
+ .opType = TestOperationType::SOFTMAX,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0),
+ PARAMETER_NONE(TestOperandType::INT32)},
+ .outputs = {OUTPUT_QUANT(1.f / 256, -128)},
+ .constructor = softmaxConstructor};
+
static void l2normConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
sameDimensionOpConstructor(dataType, rank, op);
// Generate value for "axis" parameter.
@@ -118,6 +137,24 @@ DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_axis_V1_2){
.outputs = {OUTPUT_QUANT(1.f / 128, 128)},
.constructor = l2normConstructor};
+DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_3){
+ .opType = TestOperationType::L2_NORMALIZATION,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT},
+ .outputs = {OUTPUT_QUANT(1.f / 128, 0)},
+ .constructor = l2normConstructor};
+
+DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_axis_V1_3){
+ .opType = TestOperationType::L2_NORMALIZATION,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
+ .outputs = {OUTPUT_QUANT(1.f / 128, 0)},
+ .constructor = l2normConstructor};
+
static void localResponseNormConstructor(TestOperandType dataType, uint32_t rank,
RandomOperation* op) {
sameDimensionOpConstructor(dataType, rank, op);
diff --git a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
index 1dd14fc4d..fbeb96a89 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
+++ b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -64,6 +64,10 @@ struct CppType<TestOperandType::TENSOR_QUANT8_SYMM> {
using type = int8_t;
};
template <>
+struct CppType<TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED> {
+ using type = int8_t;
+};
+template <>
struct CppType<TestOperandType::TENSOR_QUANT16_ASYMM> {
using type = uint16_t;
};
@@ -123,8 +127,17 @@ inline void uniformFinalizer(RandomOperand* op) {
case TestOperandType::TENSOR_QUANT8_ASYMM:
uniform<uint8_t>(0, 255, op);
break;
+ case TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ uniform<int8_t>(-128, 127, op);
+ break;
case TestOperandType::TENSOR_QUANT8_SYMM:
- uniform<uint8_t>(-128, 127, op);
+ uniform<int8_t>(-128, 127, op);
+ break;
+ case TestOperandType::TENSOR_QUANT16_ASYMM:
+ uniform<uint16_t>(0, 65535, op);
+ break;
+ case TestOperandType::TENSOR_QUANT16_SYMM:
+ uniform<int16_t>(-32768, 32767, op);
break;
case TestOperandType::TENSOR_BOOL8:
uniform<bool8>(true, false, op);
@@ -211,7 +224,7 @@ inline void setFreeDimensions(const std::shared_ptr<RandomOperand>& op, uint32_t
}
inline void setConvFCScale(bool applyOutputScaleBound, RandomOperation* op) {
- if (op->inputs[0]->dataType == TestOperandType::TENSOR_QUANT8_ASYMM) {
+ if (isQuantizedType(op->inputs[0]->dataType)) {
float biasScale = op->inputs[0]->scale * op->inputs[1]->scale;
op->inputs[2]->scale = biasScale;
if (applyOutputScaleBound) {
@@ -237,9 +250,18 @@ inline void defaultOperandConstructor(TestOperandType dataType, uint32_t, Random
if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) {
op->scale = getUniform<float>(0.1, 2.0);
op->zeroPoint = getUniform<int32_t>(0, 255);
+ } else if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
+ op->scale = getUniform<float>(0.1, 2.0);
+ op->zeroPoint = getUniform<int32_t>(-128, 127);
} else if (dataType == TestOperandType::TENSOR_QUANT8_SYMM) {
op->scale = getUniform<float>(0.1, 2.0);
op->zeroPoint = 0;
+ } else if (dataType == TestOperandType::TENSOR_QUANT16_ASYMM) {
+ op->scale = getUniform<float>(0.1, 2.0);
+ op->zeroPoint = getUniform<int32_t>(0, 65535);
+ } else if (dataType == TestOperandType::TENSOR_QUANT16_SYMM) {
+ op->scale = getUniform<float>(0.1, 2.0);
+ op->zeroPoint = 0;
} else {
op->scale = 0.0f;
op->zeroPoint = 0;
@@ -300,17 +322,18 @@ inline void defaultScalarOperandConstructor(TestOperandType dataType, uint32_t,
// An INPUT operand with uniformly distributed buffer values. The operand's data type is set to
// TENSOR_INT32 if the operation's primary data type is TENSOR_QUANT8_ASYMM. Otherwise, it is the
// same as INPUT_DEFAULT.
-#define INPUT_BIAS \
- { \
- .type = RandomOperandType::INPUT, \
- .constructor = \
- [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \
- if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) { \
- dataType = TestOperandType::TENSOR_INT32; \
- } \
- defaultOperandConstructor(dataType, rank, op); \
- }, \
- .finalizer = uniformFinalizer \
+#define INPUT_BIAS \
+ { \
+ .type = RandomOperandType::INPUT, \
+ .constructor = \
+ [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \
+ if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM || \
+ dataType == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) { \
+ dataType = TestOperandType::TENSOR_INT32; \
+ } \
+ defaultOperandConstructor(dataType, rank, op); \
+ }, \
+ .finalizer = uniformFinalizer \
}
// A helper macro for common code block filling operand buffer with random method.
@@ -415,8 +438,7 @@ inline void defaultScalarOperandConstructor(TestOperandType dataType, uint32_t,
.type = RandomOperandType::OUTPUT, \
.constructor = [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \
defaultOperandConstructor(dataType, rank, op); \
- if (op->dataType == TestOperandType::TENSOR_QUANT8_ASYMM || \
- dataType == TestOperandType::TENSOR_QUANT8_SYMM) { \
+ if (isQuantizedType(op->dataType)) { \
op->scale = (fixedScale); \
op->zeroPoint = (fixedZeroPoint); \
} \
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp b/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
index 008f0ce67..dd0935a99 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
@@ -196,6 +196,10 @@ DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(L2_POOL_2D, V1_2, TestOperandType::TENSOR_F
DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16,
TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(AVERAGE_POOL_2D, V1_3,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_3,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
index 7c5ae4e29..a3bad3578 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
@@ -73,6 +73,7 @@ static void reduceOpConstructor(TestOperandType, uint32_t rank, RandomOperation*
DEFINE_MEAN_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_MEAN_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_MEAN_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
#define DEFINE_REDUCE_SIGNATURE(op, ver, ...) \
DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
@@ -95,6 +96,8 @@ DEFINE_REDUCE_SIGNATURE(REDUCE_MAX, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_REDUCE_SIGNATURE(REDUCE_MIN, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_REDUCE_SIGNATURE(REDUCE_MAX, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_REDUCE_SIGNATURE(REDUCE_MIN, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void singleAxisReduceOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
@@ -109,20 +112,24 @@ static void singleAxisReduceOpConstructor(TestOperandType, uint32_t rank, Random
}
}
-#define DEFINE_ARGMIN_MAX_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
- .opType = TestOperationType::op, \
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
- TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32, \
- TestOperandType::TENSOR_QUANT8_ASYMM}, \
- .supportedRanks = {1, 2, 3, 4, 5}, \
- .version = TestHalVersion::ver, \
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)}, \
- .outputs = {OUTPUT_TYPED(TestOperandType::TENSOR_INT32)}, \
+#define DEFINE_ARGMIN_MAX_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
+ .opType = TestOperationType::op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4, 5}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::TENSOR_INT32)}, \
.constructor = singleAxisReduceOpConstructor};
-DEFINE_ARGMIN_MAX_SIGNATURE(ARGMAX, V1_2);
-DEFINE_ARGMIN_MAX_SIGNATURE(ARGMIN, V1_2);
+DEFINE_ARGMIN_MAX_SIGNATURE(ARGMAX, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ARGMIN_MAX_SIGNATURE(ARGMIN, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ARGMIN_MAX_SIGNATURE(ARGMAX, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_ARGMIN_MAX_SIGNATURE(ARGMIN, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp
index e8c5d1ab8..419db3a9a 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp
@@ -47,36 +47,36 @@ static void spaceToDepthConstructor(TestOperandType, uint32_t rank, RandomOperat
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_V1_0){
- .opType = TestOperationType::SPACE_TO_DEPTH,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = spaceToDepthConstructor};
-
-DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_V1_2){
- .opType = TestOperationType::SPACE_TO_DEPTH,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = spaceToDepthConstructor};
-
-DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_layout_V1_2){
- .opType = TestOperationType::SPACE_TO_DEPTH,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = spaceToDepthConstructor};
+#define DEFINE_SPACE_TO_DEPTH_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_##ver){ \
+ .opType = TestOperationType::SPACE_TO_DEPTH, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = spaceToDepthConstructor};
+
+DEFINE_SPACE_TO_DEPTH_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_SPACE_TO_DEPTH_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_SPACE_TO_DEPTH_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+#define DEFINE_SPACE_TO_DEPTH_WITH_LAYOUT_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_layout_##ver){ \
+ .opType = TestOperationType::SPACE_TO_DEPTH, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = spaceToDepthConstructor};
+
+DEFINE_SPACE_TO_DEPTH_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_SPACE_TO_DEPTH_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void depthToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -102,36 +102,37 @@ static void depthToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperat
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_V1_0){
- .opType = TestOperationType::DEPTH_TO_SPACE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = depthToSpaceConstructor};
-
-DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_V1_2){
- .opType = TestOperationType::DEPTH_TO_SPACE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = depthToSpaceConstructor};
-
-DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_layout_V1_2){
- .opType = TestOperationType::DEPTH_TO_SPACE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = depthToSpaceConstructor};
+#define DEFINE_DEPTH_TO_SPACE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_##ver){ \
+ .opType = TestOperationType::DEPTH_TO_SPACE, \
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
+ TestOperandType::TENSOR_QUANT8_ASYMM}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = depthToSpaceConstructor};
+
+DEFINE_DEPTH_TO_SPACE_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_DEPTH_TO_SPACE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_DEPTH_TO_SPACE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+#define DEFINE_DEPTH_TO_SPACE_WITH_LAYOUT_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_layout_##ver){ \
+ .opType = TestOperationType::DEPTH_TO_SPACE, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = depthToSpaceConstructor};
+
+DEFINE_DEPTH_TO_SPACE_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_DEPTH_TO_SPACE_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void reshapeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
@@ -149,24 +150,21 @@ static void reshapeConstructor(TestOperandType, uint32_t rank, RandomOperation*
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-DEFINE_OPERATION_SIGNATURE(RESHAPE_V1_0){
- .opType = TestOperationType::RESHAPE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = reshapeConstructor};
-
-DEFINE_OPERATION_SIGNATURE(RESHAPE_V1_2){
- .opType = TestOperationType::RESHAPE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = reshapeConstructor};
+#define DEFINE_RESHAPE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(RESHAPE_##ver){ \
+ .opType = TestOperationType::RESHAPE, \
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
+ TestOperandType::TENSOR_QUANT8_ASYMM}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = reshapeConstructor};
+
+DEFINE_RESHAPE_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_RESHAPE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_RESHAPE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void batchToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -192,39 +190,39 @@ static void batchToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperat
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_V1_1){
- .opType = TestOperationType::BATCH_TO_SPACE_ND,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_1,
- .inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 3)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = batchToSpaceConstructor};
-
-DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_V1_2){
- .opType = TestOperationType::BATCH_TO_SPACE_ND,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 3)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = batchToSpaceConstructor};
-
-DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_layout_V1_2){
- .opType = TestOperationType::BATCH_TO_SPACE_ND,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 3),
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = batchToSpaceConstructor};
+#define DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_##ver){ \
+ .opType = TestOperationType::BATCH_TO_SPACE_ND, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, \
+ /*len=*/2, /*range=*/1, 3)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = batchToSpaceConstructor};
+
+DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_BATCH_TO_SPACE_ND_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+#define DEFINE_BATCH_TO_SPACE_ND_WITH_LAYOUT_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_layout_##ver){ \
+ .opType = TestOperationType::BATCH_TO_SPACE_ND, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, \
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, \
+ 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = batchToSpaceConstructor};
+
+DEFINE_BATCH_TO_SPACE_ND_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_BATCH_TO_SPACE_ND_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void spaceToBatchConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -268,42 +266,42 @@ static const OperandSignature paddingTensor_SPACE_TO_BATCH_ND = {
for (int i = 0; i < 4; i++) op->value<int32_t>(i) = getUniform<int32_t>(0, 10);
}};
-DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_V1_1){
- .opType = TestOperationType::SPACE_TO_BATCH_ND,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_1,
- .inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
- paddingTensor_SPACE_TO_BATCH_ND},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = spaceToBatchConstructor};
-
-DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_V1_2){
- .opType = TestOperationType::SPACE_TO_BATCH_ND,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
- paddingTensor_SPACE_TO_BATCH_ND},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = spaceToBatchConstructor};
-
-DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_layout_V1_2){
- .opType = TestOperationType::SPACE_TO_BATCH_ND,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
- paddingTensor_SPACE_TO_BATCH_ND,
- PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = spaceToBatchConstructor};
+#define DEFINE_SPACE_TO_BATCH_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_##ver){ \
+ .opType = TestOperationType::SPACE_TO_BATCH_ND, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, \
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, \
+ 5), \
+ paddingTensor_SPACE_TO_BATCH_ND}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = spaceToBatchConstructor};
+
+DEFINE_SPACE_TO_BATCH_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_SPACE_TO_BATCH_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_SPACE_TO_BATCH_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+#define DEFINE_SPACE_TO_BATCH_WITH_LAYOUT_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_layout_##ver){ \
+ .opType = TestOperationType::SPACE_TO_BATCH_ND, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, \
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, \
+ 5), \
+ paddingTensor_SPACE_TO_BATCH_ND, \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = spaceToBatchConstructor};
+
+DEFINE_SPACE_TO_BATCH_WITH_LAYOUT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_SPACE_TO_BATCH_WITH_LAYOUT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void padConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
@@ -335,41 +333,44 @@ static const OperandSignature paddingScalar_PAD_V2 = {
op->dataType = TestOperandType::INT32;
op->setScalarValue<int32_t>(getUniform<int32_t>(0, 255));
break;
+ case TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ op->dataType = TestOperandType::INT32;
+ op->setScalarValue<int32_t>(getUniform<int32_t>(-128, 127));
+ break;
default:
NN_FUZZER_CHECK(false) << "Unsupported data type for PAD_V2";
}
}};
-DEFINE_OPERATION_SIGNATURE(PAD_V1_1){
- .opType = TestOperationType::PAD,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = padConstructor};
-
-DEFINE_OPERATION_SIGNATURE(PAD_V1_2){
- .opType = TestOperationType::PAD,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = padConstructor};
-
-DEFINE_OPERATION_SIGNATURE(PAD_V2_V1_2){
- .opType = TestOperationType::PAD_V2,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
- paddingScalar_PAD_V2},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = padConstructor};
+#define DEFINE_PAD_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(PAD_##ver){ \
+ .opType = TestOperationType::PAD, \
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
+ TestOperandType::TENSOR_QUANT8_ASYMM}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = padConstructor};
+
+DEFINE_PAD_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_PAD_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_PAD_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+
+#define DEFINE_PAD_V2_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(PAD_V2_##ver){ \
+ .opType = TestOperationType::PAD_V2, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ paddingScalar_PAD_V2}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = padConstructor};
+
+DEFINE_PAD_V2_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_PAD_V2_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void transposeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// Create the permutation value by randomly shuffling a sequential array.
@@ -390,24 +391,20 @@ static void transposeConstructor(TestOperandType, uint32_t rank, RandomOperation
}
// TODO: Test the case when the second input is omitted.
-DEFINE_OPERATION_SIGNATURE(TRANSPOSE_V1_1){
- .opType = TestOperationType::TRANSPOSE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = transposeConstructor};
-
-DEFINE_OPERATION_SIGNATURE(TRANSPOSE_V1_2){
- .opType = TestOperationType::TRANSPOSE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = transposeConstructor};
+#define DEFINE_TRANSPOSE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(TRANSPOSE_##ver){ \
+ .opType = TestOperationType::TRANSPOSE, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = transposeConstructor};
+
+DEFINE_TRANSPOSE_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_TRANSPOSE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_TRANSPOSE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void channelShuffleConstructor(TestOperandType dataType, uint32_t rank,
RandomOperation* op) {
@@ -420,17 +417,21 @@ static void channelShuffleConstructor(TestOperandType dataType, uint32_t rank,
(op->inputs[0]->dimensions[axis] % numGroups).setEqual(0);
}
-DEFINE_OPERATION_SIGNATURE(CHANNEL_SHUFFLE_V1_2){
- .opType = TestOperationType::CHANNEL_SHUFFLE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM,
- TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5),
- PARAMETER_NONE(TestOperandType::INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = channelShuffleConstructor};
+#define DEFINE_CHANNEL_SHUFFLE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(CHANNEL_SHUFFLE_##ver){ \
+ .opType = TestOperationType::CHANNEL_SHUFFLE, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5), \
+ PARAMETER_NONE(TestOperandType::INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = channelShuffleConstructor};
+
+DEFINE_CHANNEL_SHUFFLE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_CHANNEL_SHUFFLE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void squeezeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// A boolean array indicating whether each dimension is selected to be squeezed.
@@ -458,24 +459,20 @@ static void squeezeConstructor(TestOperandType, uint32_t rank, RandomOperation*
}
// TODO: Test the case when the second input is omitted.
-DEFINE_OPERATION_SIGNATURE(SQUEEZE_V1_1){
- .opType = TestOperationType::SQUEEZE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = squeezeConstructor};
-
-DEFINE_OPERATION_SIGNATURE(SQUEEZE_V1_2){
- .opType = TestOperationType::SQUEEZE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = squeezeConstructor};
+#define DEFINE_SQUEEZE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SQUEEZE_##ver){ \
+ .opType = TestOperationType::SQUEEZE, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = squeezeConstructor};
+
+DEFINE_SQUEEZE_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_SQUEEZE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_SQUEEZE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void expandDimsConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// Generate values for the "axis" tensor.
@@ -494,15 +491,19 @@ static void expandDimsConstructor(TestOperandType, uint32_t rank, RandomOperatio
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-DEFINE_OPERATION_SIGNATURE(EXPAND_DIMS_V1_2){
- .opType = TestOperationType::EXPAND_DIMS,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
- TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4, 5},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = expandDimsConstructor};
+#define DEFINE_EXPAND_DIMS_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(EXPAND_DIMS_##ver){ \
+ .opType = TestOperationType::EXPAND_DIMS, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4, 5}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = expandDimsConstructor};
+
+DEFINE_EXPAND_DIMS_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_EXPAND_DIMS_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void tileConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
@@ -517,15 +518,19 @@ static void tileConstructor(TestOperandType, uint32_t rank, RandomOperation* op)
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-DEFINE_OPERATION_SIGNATURE(TILE_V1_2){
- .opType = TestOperationType::TILE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
- TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4, 5},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = tileConstructor};
+#define DEFINE_TILE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(TILE_##ver){ \
+ .opType = TestOperationType::TILE, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4, 5}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = tileConstructor};
+
+DEFINE_TILE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_TILE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void fillConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
op->inputs[0]->dimensions = {rank};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp b/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp
index 8cda1c7fd..7d4d5f588 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp
@@ -74,6 +74,8 @@ static void resizeOpConstructor(TestOperandType, uint32_t rank, RandomOperation*
DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_0, TestOperandType::TENSOR_FLOAT32);
DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_2, TestOperandType::TENSOR_QUANT8_ASYMM,
TestOperandType::TENSOR_FLOAT16);
+DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_3,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
#define DEFINE_RESIZE_OP_SIGNATURE(op, ver, ...) \
DEFINE_OPERATION_SIGNATURE(op##_shape_##ver){ \
@@ -100,6 +102,9 @@ DEFINE_RESIZE_OP_SIGNATURE(RESIZE_BILINEAR, V1_2, TestOperandType::TENSOR_FLOAT3
TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_FLOAT16);
DEFINE_RESIZE_OP_SIGNATURE(RESIZE_NEAREST_NEIGHBOR, V1_2, TestOperandType::TENSOR_FLOAT32,
TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_FLOAT16);
+DEFINE_RESIZE_OP_SIGNATURE(RESIZE_BILINEAR, V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
+DEFINE_RESIZE_OP_SIGNATURE(RESIZE_NEAREST_NEIGHBOR, V1_3,
+ TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
index 93b7fc2d4..185644092 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
@@ -44,16 +44,22 @@ static void embeddingLookupFinalizer(RandomOperation* op) {
}
}
-DEFINE_OPERATION_SIGNATURE(EMBEDDING_LOOKUP_V1_0){
- .opType = TestOperationType::EMBEDDING_LOOKUP,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_INT32,
- TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {2, 3, 4},
- .version = TestHalVersion::V1_0,
- .inputs = {PARAMETER_NONE(TestOperandType::TENSOR_INT32), INPUT_DEFAULT},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = embeddingLookupConstructor,
- .finalizer = embeddingLookupFinalizer};
+#define DEFINE_EMBEDDING_LOOKUP_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(EMBEDDING_LOOKUP_##ver){ \
+ .opType = TestOperationType::EMBEDDING_LOOKUP, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {PARAMETER_NONE(TestOperandType::TENSOR_INT32), INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = embeddingLookupConstructor, \
+ .finalizer = embeddingLookupFinalizer};
+
+DEFINE_EMBEDDING_LOOKUP_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32);
+DEFINE_EMBEDDING_LOOKUP_SIGNATURE(V1_2, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_EMBEDDING_LOOKUP_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ TestOperandType::TENSOR_FLOAT16);
static void hashtableLookupConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
op->inputs[0]->dimensions = {RandomVariableType::FREE};
@@ -140,17 +146,21 @@ static void gatherFinalizer(RandomOperation* op) {
}
}
-DEFINE_OPERATION_SIGNATURE(GATHER_V1_2){
- .opType = TestOperationType::GATHER,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
- TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4, 5},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32),
- PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = gatherConstructor,
- .finalizer = gatherFinalizer};
+#define DEFINE_GATHER_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(GATHER_##ver){ \
+ .opType = TestOperationType::GATHER, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4, 5}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32), \
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = gatherConstructor, \
+ .finalizer = gatherFinalizer};
+
+DEFINE_GATHER_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_GATHER_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void selectConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
@@ -161,15 +171,19 @@ static void selectConstructor(TestOperandType, uint32_t rank, RandomOperation* o
setSameQuantization(op->outputs[0], op->inputs[1]);
}
-DEFINE_OPERATION_SIGNATURE(SELECT_V1_2){
- .opType = TestOperationType::SELECT,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
- TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_TYPED(TestOperandType::TENSOR_BOOL8), INPUT_DEFAULT, INPUT_DEFAULT},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = selectConstructor};
+#define DEFINE_SELECT_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SELECT_##ver){ \
+ .opType = TestOperationType::SELECT, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_TYPED(TestOperandType::TENSOR_BOOL8), INPUT_DEFAULT, INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = selectConstructor};
+
+DEFINE_SELECT_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_SELECT_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void topKConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
@@ -194,15 +208,19 @@ static void topKConstructor(TestOperandType, uint32_t rank, RandomOperation* op)
op->outputs[1]->doNotConnect = true;
}
-DEFINE_OPERATION_SIGNATURE(TOPK_V2_V1_2){
- .opType = TestOperationType::TOPK_V2,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
- TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, RANDOM_INT_FREE},
- .outputs = {OUTPUT_DEFAULT, OUTPUT_TYPED(TestOperandType::TENSOR_INT32)},
- .constructor = topKConstructor};
+#define DEFINE_TOPK_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(TOPK_V2_##ver){ \
+ .opType = TestOperationType::TOPK_V2, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, RANDOM_INT_FREE}, \
+ .outputs = {OUTPUT_DEFAULT, OUTPUT_TYPED(TestOperandType::TENSOR_INT32)}, \
+ .constructor = topKConstructor};
+
+DEFINE_TOPK_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_TOPK_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
static void sliceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
op->inputs[1]->dimensions = {rank};
@@ -229,17 +247,21 @@ static void sliceFinalizer(RandomOperation* op) {
}
}
-DEFINE_OPERATION_SIGNATURE(SLICE_V1_2){
- .opType = TestOperationType::SLICE,
- .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
- TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
- .supportedRanks = {1, 2, 3, 4},
- .version = TestHalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
- PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = sliceConstructor,
- .finalizer = sliceFinalizer};
+#define DEFINE_SLICE_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(SLICE_##ver){ \
+ .opType = TestOperationType::SLICE, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = TestHalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = sliceConstructor, \
+ .finalizer = sliceFinalizer};
+
+DEFINE_SLICE_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_SLICE_SIGNATURE(V1_3, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED);
inline int32_t convertToBitMask(const std::vector<bool>& flags) {
int32_t mask = 0, bit = 1;
@@ -350,6 +372,20 @@ DEFINE_OPERATION_SIGNATURE(STRIDED_SLICE_V1_2){
.constructor = stridedSliceConstructor,
.finalizer = stridedSliceFinalizer};
+DEFINE_OPERATION_SIGNATURE(STRIDED_SLICE_V1_3){
+ .opType = TestOperationType::STRIDED_SLICE,
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = TestHalVersion::V1_3,
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::INT32), PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_NONE(TestOperandType::INT32)},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = stridedSliceConstructor,
+ .finalizer = stridedSliceFinalizer};
+
} // namespace fuzzing_test
} // namespace nn
} // namespace android