summaryrefslogtreecommitdiff
path: root/nn/runtime/test/fuzzing
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2020-03-05 16:31:05 -0800
committerXusong Wang <xusongw@google.com>2020-03-09 16:09:28 -0700
commitcc9cfcda51c8ee428571e195d7400f45145e37eb (patch)
tree65eec5858b7d12c4f01ecf913fb7b6417ef82e1d /nn/runtime/test/fuzzing
parent2e0c30812f8dcef12f14e70a0504a60f1914ceda (diff)
downloadml-cc9cfcda51c8ee428571e195d7400f45145e37eb.tar.gz
Use TestOperandType instead of Type in RGG.
Bug: 150805665 Test: NNT_static_fuzzing Change-Id: Ib656ca9ecb5918b29a15e9e509407c07fa3af109
Diffstat (limited to 'nn/runtime/test/fuzzing')
-rw-r--r--nn/runtime/test/fuzzing/OperationManager.cpp4
-rw-r--r--nn/runtime/test/fuzzing/OperationManager.h13
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.cpp6
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.h5
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h24
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp24
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp47
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp110
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp48
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp310
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp134
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp13
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp91
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h202
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp187
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp77
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp193
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Resize.cpp60
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Selection.cpp86
19 files changed, 872 insertions, 762 deletions
diff --git a/nn/runtime/test/fuzzing/OperationManager.cpp b/nn/runtime/test/fuzzing/OperationManager.cpp
index c215efd2c..55f2c34c7 100644
--- a/nn/runtime/test/fuzzing/OperationManager.cpp
+++ b/nn/runtime/test/fuzzing/OperationManager.cpp
@@ -25,6 +25,8 @@ namespace android {
namespace nn {
namespace fuzzing_test {
+using namespace test_helper;
+
template <typename T>
inline bool hasValue(const std::vector<T>& vec, const T& val) {
// Empty vector indicates "no filter", i.e. always true.
@@ -37,7 +39,7 @@ bool OperationSignature::matchFilter(const OperationFilter& filter) {
}
// Match data types.
- std::vector<Type> combinedDataTypes;
+ std::vector<TestOperandType> combinedDataTypes;
for (auto dataType : supportedDataTypes) {
if (hasValue(filter.dataTypes, dataType)) combinedDataTypes.push_back(dataType);
}
diff --git a/nn/runtime/test/fuzzing/OperationManager.h b/nn/runtime/test/fuzzing/OperationManager.h
index 37870014c..0f1670f90 100644
--- a/nn/runtime/test/fuzzing/OperationManager.h
+++ b/nn/runtime/test/fuzzing/OperationManager.h
@@ -22,6 +22,7 @@
#include <string>
#include <vector>
+#include "TestHarness.h"
#include "TestNeuralNetworksWrapper.h"
#include "fuzzing/RandomGraphGenerator.h"
#include "fuzzing/RandomVariable.h"
@@ -30,8 +31,6 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-using test_wrapper::Type;
-
struct OperandSignature {
// Possible values are [INPUT | CONST | OUTPUT].
// If CONST, the generator will avoid feeding the operand with another operation’s output.
@@ -39,7 +38,8 @@ struct OperandSignature {
// The operand constructor is invoked before the operation constructor. This is for
// setting the data type, quantization parameters, or optionally the scalar value.
- std::function<void(Type, uint32_t, RandomOperand*)> constructor = nullptr;
+ std::function<void(test_helper::TestOperandType, uint32_t, RandomOperand*)> constructor =
+ nullptr;
// The operand finalizer is invoked after the graph structure is frozen but before the operation
// finalizer. This is for generating the buffer values for the operand.
@@ -61,7 +61,7 @@ enum class HalVersion : int32_t { V1_0 = 0, V1_1 = 1, V1_2 = 2 };
//
struct OperationFilter {
std::vector<ANeuralNetworksOperationType> opcodes;
- std::vector<Type> dataTypes;
+ std::vector<test_helper::TestOperandType> dataTypes;
std::vector<uint32_t> ranks;
std::vector<HalVersion> versions;
};
@@ -71,7 +71,7 @@ struct OperationSignature {
// Upon generation, the random graph generator will randomly choose a supported data type and
// rank, and pass the information to the constructors.
ANeuralNetworksOperationType opType;
- std::vector<Type> supportedDataTypes;
+ std::vector<test_helper::TestOperandType> supportedDataTypes;
std::vector<uint32_t> supportedRanks;
HalVersion version;
@@ -83,7 +83,8 @@ struct OperationSignature {
// setting the dimension relationship of random operands, and/or generating parameter values at
// the operation level, e.g. a parameter depends on or affects another operand in the same
// operation.
- std::function<void(Type, uint32_t, RandomOperation*)> constructor = nullptr;
+ std::function<void(test_helper::TestOperandType, uint32_t, RandomOperation*)> constructor =
+ nullptr;
// The operation finalizer is invoked after the graph structure is frozen and inputs and outputs
// constructors are invoked. This is for generating operand buffers at the operation level, e.g.
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 245e0e99b..6d46a951a 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -38,11 +38,11 @@ namespace nn {
namespace fuzzing_test {
using test_wrapper::Result;
-using test_wrapper::Type;
using namespace test_helper;
// Construct a RandomOperand from OperandSignature.
-RandomOperand::RandomOperand(const OperandSignature& operand, Type dataType, uint32_t rank)
+RandomOperand::RandomOperand(const OperandSignature& operand, TestOperandType dataType,
+ uint32_t rank)
: type(operand.type), finalizer(operand.finalizer) {
NN_FUZZER_LOG << "Operand: " << toString(type);
if (operand.constructor) operand.constructor(dataType, rank, this);
@@ -79,7 +79,7 @@ RandomOperation::RandomOperation(const OperationSignature& operation)
NN_FUZZER_LOG << "Operation: " << kOperationNames[static_cast<int32_t>(opType)];
// Determine the data type and rank of the operation and invoke the constructor.
- Type dataType = getRandomChoice(operation.supportedDataTypes);
+ TestOperandType dataType = getRandomChoice(operation.supportedDataTypes);
uint32_t rank = getRandomChoice(operation.supportedRanks);
// Initialize operands and operation.
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.h b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
index b1391dad6..5ffe2cd68 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
@@ -29,7 +29,6 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-using test_wrapper::Type;
using OperandBuffer = std::vector<int32_t>;
struct OperandSignature;
@@ -40,7 +39,7 @@ enum class RandomOperandType { INPUT = 0, OUTPUT = 1, INTERNAL = 2, CONST = 3 };
struct RandomOperand {
RandomOperandType type;
- Type dataType;
+ test_helper::TestOperandType dataType;
float scale = 0.0f;
int32_t zeroPoint = 0;
std::vector<RandomVariable> dimensions;
@@ -64,7 +63,7 @@ struct RandomOperand {
// eventually end up being a model output.
bool doNotConnect = false;
- RandomOperand(const OperandSignature& op, Type dataType, uint32_t rank);
+ RandomOperand(const OperandSignature& op, test_helper::TestOperandType dataType, uint32_t rank);
// Resize the underlying operand buffer.
template <typename T>
diff --git a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index ddf1a84d8..ce2412c39 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -28,6 +28,7 @@
#include "RandomGraphGenerator.h"
#include "RandomVariable.h"
+#include "TestHarness.h"
#include "TestNeuralNetworksWrapper.h"
namespace android {
@@ -238,24 +239,6 @@ static const char* kOperationNames[] = {
"RESIZE_NEAREST_NEIGHBOR",
};
-static const char* kTypeNames[] = {
- "FLOAT32",
- "INT32",
- "UINT32",
- "TENSOR_FLOAT32",
- "TENSOR_INT32",
- "TENSOR_QUANT8_ASYMM",
- "BOOL",
- "TENSOR_QUANT16_SYMM",
- "TENSOR_FLOAT16",
- "TENSOR_BOOL8",
- "FLOAT16",
- "TENSOR_QUANT8_SYMM_PER_CHANNEL",
- "TENSOR_QUANT16_ASYMM",
- "TENSOR_QUANT8_SYMM",
- "TENSOR_QUANT8_ASYMM_SIGNED",
-};
-
static const char* kLifeTimeNames[6] = {
"TEMPORARY_VARIABLE", "SUBGRAPH_INPUT", "SUBGRAPH_OUTPUT",
"CONSTANT_COPY", "CONSTANT_REFERENCE", "NO_VALUE",
@@ -344,11 +327,6 @@ inline std::string toString<RandomVariableNode>(const RandomVariableNode& var) {
}
template <>
-inline std::string toString<Type>(const Type& type) {
- return kTypeNames[static_cast<int32_t>(type)];
-}
-
-template <>
inline std::string toString<RandomVariable>(const RandomVariable& var) {
return "var" + std::to_string(var.get()->index);
}
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index ffc586474..3b0c27b3c 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -776,18 +776,18 @@ const AccuracyCriteria kLargeGraphCriteria = {.float32 =
// * 5-op graph with dimensions in range [1, 1000].
// * 40-op graph with dimensions in range [1, 10].
//
-#define TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(dataType, rank) \
- TEST_P(RandomGraphTest, SmallGraph_##dataType##_Rank##rank) { \
- OperationFilter filter = {.dataTypes = {Type::dataType}, .ranks = {rank}}; \
- OperationManager::get()->applyFilter(filter); \
- mCriteria = kSmallGraphCriteria; \
- testRandomGraph(GraphSize::SMALL, DimensionRange::WIDE); \
- } \
- TEST_P(RandomGraphTest, LargeGraph_##dataType##_Rank##rank) { \
- OperationFilter filter = {.dataTypes = {Type::dataType}, .ranks = {rank}}; \
- OperationManager::get()->applyFilter(filter); \
- mCriteria = kLargeGraphCriteria; \
- testRandomGraph(GraphSize::LARGE, DimensionRange::NARROW); \
+#define TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(dataType, rank) \
+ TEST_P(RandomGraphTest, SmallGraph_##dataType##_Rank##rank) { \
+ OperationFilter filter = {.dataTypes = {TestOperandType::dataType}, .ranks = {rank}}; \
+ OperationManager::get()->applyFilter(filter); \
+ mCriteria = kSmallGraphCriteria; \
+ testRandomGraph(GraphSize::SMALL, DimensionRange::WIDE); \
+ } \
+ TEST_P(RandomGraphTest, LargeGraph_##dataType##_Rank##rank) { \
+ OperationFilter filter = {.dataTypes = {TestOperandType::dataType}, .ranks = {rank}}; \
+ OperationManager::get()->applyFilter(filter); \
+ mCriteria = kLargeGraphCriteria; \
+ testRandomGraph(GraphSize::LARGE, DimensionRange::NARROW); \
}
// Random graph test with TENSOR_QUANT8_ASYMM as the primary data type is currently not defined.
diff --git a/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp b/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
index 101e2f707..f8f52c714 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
@@ -14,16 +14,19 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <vector>
+
#include "fuzzing/operation_signatures/OperationSignatureUtils.h"
namespace android {
namespace nn {
namespace fuzzing_test {
-static void roiTensorConstructor(Type dataType, uint32_t, RandomOperand* op) {
+static void roiTensorConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) {
op->dataType = dataType;
- if (dataType == Type::TENSOR_QUANT8_ASYMM) {
- op->dataType = Type::TENSOR_QUANT16_ASYMM;
+ if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) {
+ op->dataType = TestOperandType::TENSOR_QUANT16_ASYMM;
op->scale = 0.125f;
op->zeroPoint = 0;
}
@@ -35,7 +38,7 @@ static const OperandSignature kInputRoiTensor = {.type = RandomOperandType::CONS
static const OperandSignature kOutputRoiTensor = {.type = RandomOperandType::OUTPUT,
.constructor = roiTensorConstructor};
-static void roiConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void roiConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
bool useNchw;
if (op->opType == ANEURALNETWORKS_ROI_ALIGN) {
@@ -89,12 +92,12 @@ static void roiFinalizer(RandomOperation* op) {
uint32_t numRois = op->inputs[1]->dimensions[0].getValue();
// Fill values to the roi tensor with format [x1, y1, x2, y2].
switch (op->inputs[1]->dataType) {
- case Type::TENSOR_FLOAT32: {
+ case TestOperandType::TENSOR_FLOAT32: {
float maxH = static_cast<float>(height) * op->inputs[5]->value<float>();
float maxW = static_cast<float>(width) * op->inputs[6]->value<float>();
fillRoiTensor<float>(numRois, maxH, maxW, op->inputs[1].get());
} break;
- case Type::TENSOR_QUANT16_ASYMM: {
+ case TestOperandType::TENSOR_QUANT16_ASYMM: {
uint16_t maxH = static_cast<float>(height) * op->inputs[5]->value<float>();
uint16_t maxW = static_cast<float>(width) * op->inputs[6]->value<float>();
fillRoiTensor<uint16_t>(numRois, maxH, maxW, op->inputs[1].get());
@@ -112,28 +115,29 @@ static void roiFinalizer(RandomOperation* op) {
for (uint32_t i = 0; i < numRois; i++) op->inputs[2]->value<int32_t>(i) = batchIndex[i];
}
-// Type::TENSOR_FLOAT16 is intentionally excluded for all bounding box ops because
+// TestOperandType::TENSOR_FLOAT16 is intentionally excluded for all bounding box ops because
// 1. It has limited precision for compuation on bounding box indices, which will lead to poor
// accuracy evaluation.
// 2. There is no actual graph that uses this data type on bounding boxes.
DEFINE_OPERATION_SIGNATURE(ROI_ALIGN_V1_2){
.opType = ANEURALNETWORKS_ROI_ALIGN,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
{
INPUT_DEFAULT,
kInputRoiTensor,
- PARAMETER_NONE(Type::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
RANDOM_INT_FREE,
RANDOM_INT_FREE,
PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
- PARAMETER_RANGE(Type::INT32, 0, 10),
- PARAMETER_RANGE(Type::INT32, 0, 10),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 0, 10),
+ PARAMETER_RANGE(TestOperandType::INT32, 0, 10),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = roiConstructor,
@@ -141,25 +145,26 @@ DEFINE_OPERATION_SIGNATURE(ROI_ALIGN_V1_2){
DEFINE_OPERATION_SIGNATURE(ROI_POOLING_V1_2){
.opType = ANEURALNETWORKS_ROI_POOLING,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
{
INPUT_DEFAULT,
kInputRoiTensor,
- PARAMETER_NONE(Type::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
RANDOM_INT_FREE,
RANDOM_INT_FREE,
PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
PARAMETER_FLOAT_RANGE(0.1f, 10.0f),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = roiConstructor,
.finalizer = roiFinalizer};
-static void heatmapMaxKeypointConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void heatmapMaxKeypointConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
bool useNchw = op->inputs[2]->value<bool8>();
RandomVariable heatmapSize = RandomVariableType::FREE;
@@ -188,11 +193,11 @@ static void heatmapMaxKeypointFinalizer(RandomOperation* op) {
uint32_t heatmapSize = op->inputs[0]->dimensions[2].getValue();
// Fill values to the roi tensor with format [x1, y1, x2, y2].
switch (op->inputs[1]->dataType) {
- case Type::TENSOR_FLOAT32: {
+ case TestOperandType::TENSOR_FLOAT32: {
float maxSize = heatmapSize;
fillRoiTensor<float>(numRois, maxSize, maxSize, op->inputs[1].get());
} break;
- case Type::TENSOR_QUANT16_ASYMM: {
+ case TestOperandType::TENSOR_QUANT16_ASYMM: {
uint16_t maxSize = static_cast<uint16_t>(heatmapSize * 8);
fillRoiTensor<uint16_t>(numRois, maxSize, maxSize, op->inputs[1].get());
} break;
@@ -203,10 +208,12 @@ static void heatmapMaxKeypointFinalizer(RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(HEATMAP_MAX_KEYPOINT_V1_2){
.opType = ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, kInputRoiTensor, PARAMETER_CHOICE(Type::BOOL, true, false)},
+ .inputs = {INPUT_DEFAULT, kInputRoiTensor,
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
.outputs = {OUTPUT_DEFAULT, kOutputRoiTensor},
.constructor = heatmapMaxKeypointConstructor,
.finalizer = heatmapMaxKeypointFinalizer};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
index e98ba465c..dd2fd9acb 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
@@ -14,13 +14,15 @@
* limitations under the License.
*/
+#include <algorithm>
+
#include "fuzzing/operation_signatures/OperationSignatureUtils.h"
namespace android {
namespace nn {
namespace fuzzing_test {
-static void broadcastOpConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
+static void broadcastOpConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
// TODO: All inputs of the broadcast op have the same rank 4 for now.
op->inputs[0]->dimensions.resize(rank);
op->inputs[1]->dimensions.resize(rank);
@@ -41,7 +43,7 @@ static void broadcastOpConstructor(Type dataType, uint32_t rank, RandomOperation
}
// MUL requires output.scale > input0.scale * input1.scale.
- if (dataType == Type::TENSOR_QUANT8_ASYMM && op->opType == ANEURALNETWORKS_MUL) {
+ if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM && op->opType == ANEURALNETWORKS_MUL) {
float minScale = op->inputs[0]->scale * op->inputs[1]->scale;
op->outputs[0]->scale = getUniform(minScale, minScale * 5);
}
@@ -54,25 +56,29 @@ static void broadcastOpConstructor(Type dataType, uint32_t rank, RandomOperation
}
// For broadcast operations with fused activation.
-#define DEFINE_BROADCAST_WITH_ACT_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3)}, \
- .outputs = {OUTPUT_DEFAULT}, \
+#define DEFINE_BROADCAST_WITH_ACT_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
.constructor = broadcastOpConstructor};
// Arithmetic with activation.
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(ADD, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(MUL, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(SUB, V1_1, Type::TENSOR_FLOAT32);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_1, Type::TENSOR_FLOAT32);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(ADD, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(MUL, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(SUB, V1_2, Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_2, Type::TENSOR_FLOAT16);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(ADD, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(MUL, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(SUB, V1_1, TestOperandType::TENSOR_FLOAT32);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_1, TestOperandType::TENSOR_FLOAT32);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(ADD, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(MUL, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(SUB, V1_2, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_2, TestOperandType::TENSOR_FLOAT16);
// For broadcast ops with output of the same data type as inputs.
#define DEFINE_BROADCAST_SIGNATURE(op, ver, ...) \
@@ -85,40 +91,50 @@ DEFINE_BROADCAST_WITH_ACT_SIGNATURE(DIV, V1_2, Type::TENSOR_FLOAT16);
.constructor = broadcastOpConstructor};
// Arithmetic without activation.
-DEFINE_BROADCAST_SIGNATURE(POW, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_BROADCAST_SIGNATURE(PRELU, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM);
-DEFINE_BROADCAST_SIGNATURE(MAXIMUM, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_INT32);
-DEFINE_BROADCAST_SIGNATURE(MINIMUM, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_INT32);
+DEFINE_BROADCAST_SIGNATURE(POW, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_BROADCAST_SIGNATURE(PRELU, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_BROADCAST_SIGNATURE(MAXIMUM, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_INT32);
+DEFINE_BROADCAST_SIGNATURE(MINIMUM, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_INT32);
// Logical
-DEFINE_BROADCAST_SIGNATURE(LOGICAL_AND, V1_2, Type::TENSOR_BOOL8);
-DEFINE_BROADCAST_SIGNATURE(LOGICAL_OR, V1_2, Type::TENSOR_BOOL8);
+DEFINE_BROADCAST_SIGNATURE(LOGICAL_AND, V1_2, TestOperandType::TENSOR_BOOL8);
+DEFINE_BROADCAST_SIGNATURE(LOGICAL_OR, V1_2, TestOperandType::TENSOR_BOOL8);
// Comparisons
-#define DEFINE_COMPARISON_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, INPUT_DEFAULT}, \
- .outputs = {OUTPUT_TYPED(Type::TENSOR_BOOL8)}, \
- .constructor = broadcastOpConstructor};
+#define DEFINE_COMPARISON_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::TENSOR_BOOL8)}, \
+ .constructor = broadcastOpConstructor};
-DEFINE_COMPARISON_SIGNATURE(EQUAL, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_BOOL8);
-DEFINE_COMPARISON_SIGNATURE(GREATER, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_COMPARISON_SIGNATURE(GREATER_EQUAL, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_COMPARISON_SIGNATURE(LESS, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_COMPARISON_SIGNATURE(LESS_EQUAL, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_COMPARISON_SIGNATURE(NOT_EQUAL, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_BOOL8);
+DEFINE_COMPARISON_SIGNATURE(EQUAL, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_BOOL8);
+DEFINE_COMPARISON_SIGNATURE(GREATER, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_COMPARISON_SIGNATURE(GREATER_EQUAL, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_COMPARISON_SIGNATURE(LESS, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_COMPARISON_SIGNATURE(LESS_EQUAL, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_COMPARISON_SIGNATURE(NOT_EQUAL, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_BOOL8);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp b/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
index 7e9c5c953..b7d3ce9fd 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
@@ -47,45 +47,51 @@ static void concatConstructor(uint32_t numInputs, bool isV1_0, uint32_t rank, Ra
DEFINE_OPERATION_SIGNATURE(CONCAT_2_V1_0){
.opType = ANEURALNETWORKS_CONCATENATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
- .constructor = [](Type, uint32_t rank, RandomOperation* op) {
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
concatConstructor(/*numInputs=*/2, /*isV1_0=*/true, rank, op);
}};
DEFINE_OPERATION_SIGNATURE(CONCAT_3_V1_0){
.opType = ANEURALNETWORKS_CONCATENATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_DEFAULT,
+ PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
- .constructor = [](Type, uint32_t rank, RandomOperation* op) {
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
concatConstructor(/*numInputs=*/3, /*isV1_0=*/true, rank, op);
}};
DEFINE_OPERATION_SIGNATURE(CONCAT_2_V1_2){
.opType = ANEURALNETWORKS_CONCATENATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
- .constructor = [](Type, uint32_t rank, RandomOperation* op) {
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
concatConstructor(/*numInputs=*/2, /*isV1_0=*/false, rank, op);
}};
DEFINE_OPERATION_SIGNATURE(CONCAT_3_V1_2){
.opType = ANEURALNETWORKS_CONCATENATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_DEFAULT,
+ PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
- .constructor = [](Type, uint32_t rank, RandomOperation* op) {
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
concatConstructor(/*numInputs=*/3, /*isV1_0=*/false, rank, op);
}};
@@ -115,25 +121,27 @@ static void splitConstructor(uint32_t numSplits, uint32_t rank, RandomOperation*
DEFINE_OPERATION_SIGNATURE(SPLIT_2_V1_2){
.opType = ANEURALNETWORKS_SPLIT,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32), PARAMETER_CHOICE(Type::INT32, 2)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_CHOICE(TestOperandType::INT32, 2)},
.outputs = {OUTPUT_DEFAULT, OUTPUT_DEFAULT},
- .constructor = [](Type, uint32_t rank, RandomOperation* op) {
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
splitConstructor(/*numSplits=*/2, rank, op);
}};
DEFINE_OPERATION_SIGNATURE(SPLIT_3_V1_2){
.opType = ANEURALNETWORKS_SPLIT,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32), PARAMETER_CHOICE(Type::INT32, 3)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_CHOICE(TestOperandType::INT32, 3)},
.outputs = {OUTPUT_DEFAULT, OUTPUT_DEFAULT, OUTPUT_DEFAULT},
- .constructor = [](Type, uint32_t rank, RandomOperation* op) {
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperation* op) {
splitConstructor(/*numSplits=*/3, rank, op);
}};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp b/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp
index 7a502897a..7b2286b17 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Convolutions.cpp
@@ -22,7 +22,8 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-static void conv2DExplicitConstructor(Type, uint32_t rank, HalVersion ver, RandomOperation* op) {
+static void conv2DExplicitConstructor(TestOperandType, uint32_t rank, HalVersion ver,
+ RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -76,7 +77,8 @@ static void conv2DExplicitConstructor(Type, uint32_t rank, HalVersion ver, Rando
setConvFCScale(/*applyOutputScaleBound=*/ver == HalVersion::V1_0, op);
}
-static void conv2DImplicitConstructor(Type, uint32_t rank, HalVersion ver, RandomOperation* op) {
+static void conv2DImplicitConstructor(TestOperandType, uint32_t rank, HalVersion ver,
+ RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -136,13 +138,13 @@ static void conv2DImplicitConstructor(Type, uint32_t rank, HalVersion ver, Rando
INPUT_DEFAULT, \
INPUT_DEFAULT, \
INPUT_BIAS, \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
}, \
.outputs = {OUTPUT_DEFAULT}, \
.constructor = std::bind(conv2DExplicitConstructor, _1, _2, HalVersion::ver, _3)}; \
@@ -156,21 +158,24 @@ static void conv2DImplicitConstructor(Type, uint32_t rank, HalVersion ver, Rando
INPUT_DEFAULT, \
INPUT_DEFAULT, \
INPUT_BIAS, \
- PARAMETER_CHOICE(Type::INT32, 1, 2), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
}, \
.outputs = {OUTPUT_DEFAULT}, \
.constructor = std::bind(conv2DImplicitConstructor, _1, _2, HalVersion::ver, _3)};
-DEFINE_CONV_2D_SIGNATURE(V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_CONV_2D_SIGNATURE(V1_2, Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM);
+DEFINE_CONV_2D_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_CONV_2D_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_layout_V1_2){
.opType = ANEURALNETWORKS_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -178,22 +183,23 @@ DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_layout_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(conv2DExplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_layout_V1_2){
.opType = ANEURALNETWORKS_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -201,19 +207,20 @@ DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_layout_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 1, 2),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(conv2DImplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_dilation_V1_2){
.opType = ANEURALNETWORKS_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -221,24 +228,25 @@ DEFINE_OPERATION_SIGNATURE(CONV_2D_explicit_dilation_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(conv2DExplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_dilation_V1_2){
.opType = ANEURALNETWORKS_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -246,18 +254,18 @@ DEFINE_OPERATION_SIGNATURE(CONV_2D_implicit_dilation_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 1, 2),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(conv2DImplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
-static void depthwiseConv2DExplicitConstructor(Type, uint32_t rank, HalVersion ver,
+static void depthwiseConv2DExplicitConstructor(TestOperandType, uint32_t rank, HalVersion ver,
RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -313,7 +321,7 @@ static void depthwiseConv2DExplicitConstructor(Type, uint32_t rank, HalVersion v
setConvFCScale(/*applyOutputScaleBound=*/ver == HalVersion::V1_0, op);
}
-static void depthwiseConv2DImplicitConstructor(Type, uint32_t rank, HalVersion ver,
+static void depthwiseConv2DImplicitConstructor(TestOperandType, uint32_t rank, HalVersion ver,
RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
@@ -376,14 +384,14 @@ static void depthwiseConv2DImplicitConstructor(Type, uint32_t rank, HalVersion v
INPUT_DEFAULT, \
INPUT_DEFAULT, \
INPUT_BIAS, \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
RANDOM_INT_RANGE(1, 5), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
}, \
.outputs = {OUTPUT_DEFAULT}, \
.constructor = \
@@ -398,23 +406,26 @@ static void depthwiseConv2DImplicitConstructor(Type, uint32_t rank, HalVersion v
INPUT_DEFAULT, \
INPUT_DEFAULT, \
INPUT_BIAS, \
- PARAMETER_CHOICE(Type::INT32, 1, 2), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
RANDOM_INT_RANGE(1, 5), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
}, \
.outputs = {OUTPUT_DEFAULT}, \
.constructor = \
std::bind(depthwiseConv2DImplicitConstructor, _1, _2, HalVersion::ver, _3)};
-DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_2, Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM);
+DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_DEPTHWISE_CONV_2D_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_layout_V1_2){
.opType = ANEURALNETWORKS_DEPTHWISE_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -422,23 +433,24 @@ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_layout_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(depthwiseConv2DExplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_layout_V1_2){
.opType = ANEURALNETWORKS_DEPTHWISE_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -446,20 +458,21 @@ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_layout_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 1, 2),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(depthwiseConv2DImplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_dilation_V1_2){
.opType = ANEURALNETWORKS_DEPTHWISE_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -467,25 +480,26 @@ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_explicit_dilation_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(depthwiseConv2DExplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_dilation_V1_2){
.opType = ANEURALNETWORKS_DEPTHWISE_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -493,19 +507,19 @@ DEFINE_OPERATION_SIGNATURE(DEPTHWISE_CONV_2D_implicit_dilation_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 1, 2),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(depthwiseConv2DImplicitConstructor, _1, _2, HalVersion::V1_2, _3)};
-static void groupedConv2DExplicitConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void groupedConv2DExplicitConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -560,7 +574,7 @@ static void groupedConv2DExplicitConstructor(Type, uint32_t rank, RandomOperatio
setConvFCScale(/*applyOutputScaleBound=*/false, op);
}
-static void groupedConv2DImplicitConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void groupedConv2DImplicitConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -612,8 +626,9 @@ static void groupedConv2DImplicitConstructor(Type, uint32_t rank, RandomOperatio
DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_explicit_V1_2){
.opType = ANEURALNETWORKS_GROUPED_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -621,23 +636,24 @@ DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_explicit_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = groupedConv2DExplicitConstructor};
DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_implicit_V1_2){
.opType = ANEURALNETWORKS_GROUPED_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -645,17 +661,18 @@ DEFINE_OPERATION_SIGNATURE(GROUPED_CONV_2D_implicit_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 1, 2),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
RANDOM_INT_RANGE(1, 5),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = groupedConv2DImplicitConstructor};
-static void transposeConv2DExplicitConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void transposeConv2DExplicitConstructor(TestOperandType, uint32_t rank,
+ RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -701,7 +718,8 @@ static void transposeConv2DExplicitConstructor(Type, uint32_t rank, RandomOperat
setConvFCScale(/*applyOutputScaleBound=*/false, op);
}
-static void transposeConv2DImplicitConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void transposeConv2DImplicitConstructor(TestOperandType, uint32_t rank,
+ RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -744,8 +762,9 @@ static void transposeConv2DImplicitConstructor(Type, uint32_t rank, RandomOperat
DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_explicit_V1_2){
.opType = ANEURALNETWORKS_TRANSPOSE_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -753,22 +772,23 @@ DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_explicit_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = transposeConv2DExplicitConstructor};
DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_implicit_V1_2){
.opType = ANEURALNETWORKS_TRANSPOSE_CONV_2D,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -776,12 +796,12 @@ DEFINE_OPERATION_SIGNATURE(TRANSPOSE_CONV_2D_implicit_V1_2){
INPUT_DEFAULT,
INPUT_DEFAULT,
INPUT_BIAS,
- PARAMETER_NONE(Type::TENSOR_INT32),
- PARAMETER_CHOICE(Type::INT32, 1, 2),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = transposeConv2DImplicitConstructor};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
index dc825bf81..097a0f1ea 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
@@ -29,16 +29,19 @@ namespace fuzzing_test {
.outputs = {OUTPUT_DEFAULT}, \
.constructor = sameShapeOpConstructor};
-DEFINE_ELEMENTWISE_SIGNATURE(FLOOR, V1_0, Type::TENSOR_FLOAT32);
-DEFINE_ELEMENTWISE_SIGNATURE(RELU, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_ELEMENTWISE_SIGNATURE(RELU1, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_ELEMENTWISE_SIGNATURE(TANH, V1_0, Type::TENSOR_FLOAT32);
-DEFINE_ELEMENTWISE_SIGNATURE(FLOOR, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE(LOGISTIC, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE(RELU, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE(RELU1, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_2, Type::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE(FLOOR, V1_0, TestOperandType::TENSOR_FLOAT32);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU1, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_ELEMENTWISE_SIGNATURE(TANH, V1_0, TestOperandType::TENSOR_FLOAT32);
+DEFINE_ELEMENTWISE_SIGNATURE(FLOOR, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE(LOGISTIC, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU1, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_2, TestOperandType::TENSOR_FLOAT16);
#define DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(op, ver, ...) \
DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op, \
@@ -49,16 +52,21 @@ DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_2, Type::TENSOR_FLOAT16);
.outputs = {OUTPUT_DEFAULT}, \
.constructor = sameShapeOpConstructor};
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(ABS, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(EXP, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(NEG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_INT32);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SIN, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOGICAL_NOT, V1_2, Type::TENSOR_BOOL8);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(ABS, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(EXP, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(NEG, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SIN, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOGICAL_NOT, V1_2, TestOperandType::TENSOR_BOOL8);
// LOG, SQRT, and RSQRT may produce NaN output values. We should not connect the output tensor to
// the input of another operation.
-static void elementwiseOpWithDisconnectedOutput(Type type, uint32_t rank, RandomOperation* op) {
+static void elementwiseOpWithDisconnectedOutput(TestOperandType type, uint32_t rank,
+ RandomOperation* op) {
sameShapeOpConstructor(type, rank, op);
op->outputs[0]->doNotConnect = true;
}
@@ -72,12 +80,12 @@ static void elementwiseOpWithDisconnectedOutput(Type type, uint32_t rank, Random
.outputs = {OUTPUT_DEFAULT}, \
.constructor = elementwiseOpWithDisconnectedOutput};
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(LOG, V1_2, Type::TENSOR_FLOAT32,
- Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(RSQRT, V1_2, Type::TENSOR_FLOAT32,
- Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(SQRT, V1_2, Type::TENSOR_FLOAT32,
- Type::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(LOG, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(RSQRT, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(SQRT, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
// Quantized operations with special output quantization parameters.
#define DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(op, ver, s, z, ...) \
@@ -90,52 +98,62 @@ DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(SQRT, V1_2, Type::TENSOR_F
.constructor = sameDimensionOpConstructor};
DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(LOGISTIC, V1_0, /*scale=*/1.f / 256, /*zeroPoint=*/0,
- Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
+ TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(TANH, V1_2, /*scale=*/1.f / 128, /*zeroPoint=*/128,
- Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM);
+ TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
// Operations with output data type different from input.
-#define DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(op, ver, outType, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##outType##_##ver){.opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT}, \
- .outputs = {OUTPUT_TYPED(Type::outType)}, \
- .constructor = sameDimensionOpConstructor};
+#define DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(op, ver, outType, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##outType##_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
+ .constructor = sameDimensionOpConstructor};
DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_0, /*outType=*/TENSOR_FLOAT32,
- Type::TENSOR_QUANT8_ASYMM);
+ TestOperandType::TENSOR_QUANT8_ASYMM);
DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT32,
- Type::TENSOR_QUANT8_SYMM);
+ TestOperandType::TENSOR_QUANT8_SYMM);
DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(DEQUANTIZE, V1_2, /*outType=*/TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_QUANT8_SYMM);
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_QUANT8_SYMM);
DEFINE_ELEMENTWISE_WITH_TYPED_OUTPUT_SIGNATURE(QUANTIZE, V1_2, /*outType=*/TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-
-#define DEFINE_CAST_SIGNATURE(ver, outType, ...) \
- DEFINE_OPERATION_SIGNATURE(CAST_##outType##_##ver){.opType = ANEURALNETWORKS_CAST, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4, 5}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT}, \
- .outputs = {OUTPUT_TYPED(Type::outType)}, \
- .constructor = sameDimensionOpConstructor};
-
-DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_FLOAT32, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_INT32);
-
-DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_FLOAT16, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_INT32);
-
-DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_QUANT8_ASYMM, Type::TENSOR_FLOAT32,
- Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_INT32);
-
-DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_INT32, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_INT32);
+ TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+
+#define DEFINE_CAST_SIGNATURE(ver, outType, ...) \
+ DEFINE_OPERATION_SIGNATURE(CAST_##outType##_##ver){ \
+ .opType = ANEURALNETWORKS_CAST, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4, 5}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::outType)}, \
+ .constructor = sameDimensionOpConstructor};
+
+DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_INT32);
+
+DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_FLOAT16, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_INT32);
+
+DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_INT32);
+
+DEFINE_CAST_SIGNATURE(V1_2, /*outType=*/TENSOR_INT32, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_INT32);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp b/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp
index 7832e987d..25e7d9e4a 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/FullyConnected.cpp
@@ -22,7 +22,8 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-static void fullyConnectedConstructor(Type, uint32_t rank, HalVersion ver, RandomOperation* op) {
+static void fullyConnectedConstructor(TestOperandType, uint32_t rank, HalVersion ver,
+ RandomOperation* op) {
// Inputs, flattened to [batch_size, input_size]
op->inputs[0]->dimensions.resize(rank);
RandomVariable numElements = 1;
@@ -46,21 +47,23 @@ static void fullyConnectedConstructor(Type, uint32_t rank, HalVersion ver, Rando
DEFINE_OPERATION_SIGNATURE(signature_FULLY_CONNECTED_V1_0){
.opType = ANEURALNETWORKS_FULLY_CONNECTED,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {2, 3, 4},
.version = HalVersion::V1_0,
.inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3)},
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3)},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(fullyConnectedConstructor, _1, _2, HalVersion::V1_0, _3)};
DEFINE_OPERATION_SIGNATURE(signature_FULLY_CONNECTED_V1_2){
.opType = ANEURALNETWORKS_FULLY_CONNECTED,
- .supportedDataTypes = {Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {2, 3, 4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT, INPUT_DEFAULT, INPUT_BIAS,
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3)},
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3)},
.outputs = {OUTPUT_DEFAULT},
.constructor = std::bind(fullyConnectedConstructor, _1, _2, HalVersion::V1_2, _3)};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
index 59dc0cae8..4ad8367c9 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
@@ -20,7 +20,7 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-static void softmaxConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
+static void softmaxConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
sameDimensionOpConstructor(dataType, rank, op);
// Generate value for "axis" parameter.
if (op->inputs.size() > 2) {
@@ -30,7 +30,8 @@ static void softmaxConstructor(Type dataType, uint32_t rank, RandomOperation* op
DEFINE_OPERATION_SIGNATURE(SOFTMAX_V1_0){
.opType = ANEURALNETWORKS_SOFTMAX,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {2, 4},
.version = HalVersion::V1_0,
.inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0)},
@@ -39,7 +40,8 @@ DEFINE_OPERATION_SIGNATURE(SOFTMAX_V1_0){
DEFINE_OPERATION_SIGNATURE(SOFTMAX_V1_2){
.opType = ANEURALNETWORKS_SOFTMAX,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 3},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0)},
@@ -48,7 +50,7 @@ DEFINE_OPERATION_SIGNATURE(SOFTMAX_V1_2){
DEFINE_OPERATION_SIGNATURE(SOFTMAX_float16_V1_2){
.opType = ANEURALNETWORKS_SOFTMAX,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0)},
@@ -57,15 +59,17 @@ DEFINE_OPERATION_SIGNATURE(SOFTMAX_float16_V1_2){
DEFINE_OPERATION_SIGNATURE(SOFTMAX_axis_V1_2){
.opType = ANEURALNETWORKS_SOFTMAX,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0), PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0),
+ PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_QUANT(1.f / 256, 0)},
.constructor = softmaxConstructor};
-static void l2normConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
+static void l2normConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
sameDimensionOpConstructor(dataType, rank, op);
// Generate value for "axis" parameter.
if (op->inputs.size() > 1) {
@@ -76,25 +80,28 @@ static void l2normConstructor(Type dataType, uint32_t rank, RandomOperation* op)
op->outputs[0]->doNotConnect = true;
}
-DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_0){.opType = ANEURALNETWORKS_L2_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32},
- .supportedRanks = {4},
- .version = HalVersion::V1_0,
- .inputs = {INPUT_DEFAULT},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = l2normConstructor};
-
-DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_2){.opType = ANEURALNETWORKS_L2_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32},
- .supportedRanks = {1, 2, 3},
- .version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = l2normConstructor};
+DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_0){
+ .opType = ANEURALNETWORKS_L2_NORMALIZATION,
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32},
+ .supportedRanks = {4},
+ .version = HalVersion::V1_0,
+ .inputs = {INPUT_DEFAULT},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = l2normConstructor};
+
+DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_2){
+ .opType = ANEURALNETWORKS_L2_NORMALIZATION,
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32},
+ .supportedRanks = {1, 2, 3},
+ .version = HalVersion::V1_2,
+ .inputs = {INPUT_DEFAULT},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = l2normConstructor};
DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_type_V1_2){
.opType = ANEURALNETWORKS_L2_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT},
@@ -103,15 +110,16 @@ DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_type_V1_2){
DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_axis_V1_2){
.opType = ANEURALNETWORKS_L2_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_QUANT(1.f / 128, 128)},
.constructor = l2normConstructor};
-static void localResponseNormConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
+static void localResponseNormConstructor(TestOperandType dataType, uint32_t rank,
+ RandomOperation* op) {
sameDimensionOpConstructor(dataType, rank, op);
// Generate value for "axis" parameter.
if (op->inputs.size() > 5) {
@@ -121,13 +129,13 @@ static void localResponseNormConstructor(Type dataType, uint32_t rank, RandomOpe
DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_V1_0){
.opType = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32},
.supportedRanks = {4},
.version = HalVersion::V1_0,
.inputs =
{
INPUT_DEFAULT,
- PARAMETER_RANGE(Type::INT32, 1, 10),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 10),
PARAMETER_FLOAT_RANGE(0.0, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 1.0),
@@ -137,13 +145,13 @@ DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_V1_0){
DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_V1_2){
.opType = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32},
.supportedRanks = {1, 2, 3},
.version = HalVersion::V1_2,
.inputs =
{
INPUT_DEFAULT,
- PARAMETER_RANGE(Type::INT32, 1, 10),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 10),
PARAMETER_FLOAT_RANGE(0.0, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 1.0),
@@ -153,13 +161,13 @@ DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_V1_2){
DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_float16_V1_2){
.opType = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
.inputs =
{
INPUT_DEFAULT,
- PARAMETER_RANGE(Type::INT32, 1, 10),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 10),
PARAMETER_FLOAT_RANGE(0.0, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 1.0),
@@ -169,24 +177,24 @@ DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_float16_V1_2){
DEFINE_OPERATION_SIGNATURE(LOCAL_RESPONSE_NORMALIZATION_axis_V1_2){
.opType = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
.inputs =
{
INPUT_DEFAULT,
- PARAMETER_RANGE(Type::INT32, 1, 10),
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 10),
PARAMETER_FLOAT_RANGE(0.0, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 10.0),
PARAMETER_FLOAT_RANGE(0.1, 1.0),
- PARAMETER_NONE(Type::INT32),
+ PARAMETER_NONE(TestOperandType::INT32),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = localResponseNormConstructor};
DEFINE_OPERATION_SIGNATURE(INSTANCE_NORMALIZATION_V1_2){
.opType = ANEURALNETWORKS_INSTANCE_NORMALIZATION,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs =
@@ -195,17 +203,18 @@ DEFINE_OPERATION_SIGNATURE(INSTANCE_NORMALIZATION_V1_2){
PARAMETER_FLOAT_RANGE(0.1, 10.0),
PARAMETER_FLOAT_RANGE(-10.0, 10.0),
PARAMETER_FLOAT_RANGE(0.01, 1.0),
- PARAMETER_CHOICE(Type::BOOL, true, false),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false),
},
.outputs = {OUTPUT_DEFAULT},
.constructor = sameShapeOpConstructor};
DEFINE_OPERATION_SIGNATURE(LOG_SOFTMAX_TENSOR_FLOAT32_V1_2){
.opType = ANEURALNETWORKS_LOG_SOFTMAX,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0), PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.1, 10.0),
+ PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = softmaxConstructor};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
index ca54c11cd..04cf4e6e1 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
+++ b/nn/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -18,9 +18,11 @@
#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FUZZING_OPERATION_SIGNATURES_OPERATION_SIGNATURE_UTILS_H
#include <functional>
+#include <memory>
#include <string>
#include <vector>
+#include "TestHarness.h"
#include "TestNeuralNetworksWrapper.h"
#include "fuzzing/OperationManager.h"
#include "fuzzing/RandomGraphGenerator.h"
@@ -32,55 +34,57 @@ namespace fuzzing_test {
namespace {
-// From Type to cpp type.
-template <Type type>
+using namespace test_helper;
+
+// From TestOperandType to cpp type.
+template <TestOperandType type>
struct CppType;
template <>
-struct CppType<Type::TENSOR_FLOAT32> {
+struct CppType<TestOperandType::TENSOR_FLOAT32> {
using type = float;
};
template <>
-struct CppType<Type::FLOAT32> {
+struct CppType<TestOperandType::FLOAT32> {
using type = float;
};
template <>
-struct CppType<Type::TENSOR_INT32> {
+struct CppType<TestOperandType::TENSOR_INT32> {
using type = int32_t;
};
template <>
-struct CppType<Type::INT32> {
+struct CppType<TestOperandType::INT32> {
using type = int32_t;
};
template <>
-struct CppType<Type::TENSOR_QUANT8_ASYMM> {
+struct CppType<TestOperandType::TENSOR_QUANT8_ASYMM> {
using type = uint8_t;
};
template <>
-struct CppType<Type::TENSOR_QUANT8_SYMM> {
+struct CppType<TestOperandType::TENSOR_QUANT8_SYMM> {
using type = int8_t;
};
template <>
-struct CppType<Type::TENSOR_QUANT16_ASYMM> {
+struct CppType<TestOperandType::TENSOR_QUANT16_ASYMM> {
using type = uint16_t;
};
template <>
-struct CppType<Type::TENSOR_QUANT16_SYMM> {
+struct CppType<TestOperandType::TENSOR_QUANT16_SYMM> {
using type = int16_t;
};
template <>
-struct CppType<Type::TENSOR_BOOL8> {
+struct CppType<TestOperandType::TENSOR_BOOL8> {
using type = bool8;
};
template <>
-struct CppType<Type::BOOL> {
+struct CppType<TestOperandType::BOOL> {
using type = bool8;
};
template <>
-struct CppType<Type::TENSOR_FLOAT16> {
+struct CppType<TestOperandType::TENSOR_FLOAT16> {
using type = _Float16;
};
template <>
-struct CppType<Type::FLOAT16> {
+struct CppType<TestOperandType::FLOAT16> {
using type = _Float16;
};
@@ -108,22 +112,22 @@ inline void uniform<bool8>(bool8, bool8, RandomOperand* op) {
// Dispatch to different generators by operand dataType.
inline void uniformFinalizer(RandomOperand* op) {
switch (op->dataType) {
- case Type::TENSOR_FLOAT32:
+ case TestOperandType::TENSOR_FLOAT32:
uniform<float>(kMinFloat32, kMaxFloat32, op);
break;
- case Type::TENSOR_INT32:
+ case TestOperandType::TENSOR_INT32:
uniform<int32_t>(0, 255, op);
break;
- case Type::TENSOR_QUANT8_ASYMM:
+ case TestOperandType::TENSOR_QUANT8_ASYMM:
uniform<uint8_t>(0, 255, op);
break;
- case Type::TENSOR_QUANT8_SYMM:
+ case TestOperandType::TENSOR_QUANT8_SYMM:
uniform<uint8_t>(-128, 127, op);
break;
- case Type::TENSOR_BOOL8:
+ case TestOperandType::TENSOR_BOOL8:
uniform<bool8>(true, false, op);
break;
- case Type::TENSOR_FLOAT16:
+ case TestOperandType::TENSOR_FLOAT16:
uniform<_Float16>(kMinFloat32, kMaxFloat32, op);
break;
default:
@@ -204,7 +208,7 @@ inline void setFreeDimensions(const std::shared_ptr<RandomOperand>& op, uint32_t
}
inline void setConvFCScale(bool applyOutputScaleBound, RandomOperation* op) {
- if (op->inputs[0]->dataType == Type::TENSOR_QUANT8_ASYMM) {
+ if (op->inputs[0]->dataType == TestOperandType::TENSOR_QUANT8_ASYMM) {
float biasScale = op->inputs[0]->scale * op->inputs[1]->scale;
op->inputs[2]->scale = biasScale;
if (applyOutputScaleBound) {
@@ -214,23 +218,23 @@ inline void setConvFCScale(bool applyOutputScaleBound, RandomOperation* op) {
}
// For ops with input0 and output0 of the same dimension.
-inline void sameDimensionOpConstructor(Type, uint32_t rank, RandomOperation* op) {
+inline void sameDimensionOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
op->outputs[0]->dimensions = op->inputs[0]->dimensions;
}
// For ops with input0 and output0 of the same shape including scale and zeroPoint.
-inline void sameShapeOpConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
+inline void sameShapeOpConstructor(TestOperandType dataType, uint32_t rank, RandomOperation* op) {
sameDimensionOpConstructor(dataType, rank, op);
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op) {
+inline void defaultOperandConstructor(TestOperandType dataType, uint32_t, RandomOperand* op) {
op->dataType = dataType;
- if (dataType == Type::TENSOR_QUANT8_ASYMM) {
+ if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) {
op->scale = getUniform<float>(0.1, 2.0);
op->zeroPoint = getUniform<int32_t>(0, 255);
- } else if (dataType == Type::TENSOR_QUANT8_SYMM) {
+ } else if (dataType == TestOperandType::TENSOR_QUANT8_SYMM) {
op->scale = getUniform<float>(0.1, 2.0);
op->zeroPoint = 0;
} else {
@@ -253,7 +257,7 @@ inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op
#define INPUT_TYPED(opType) \
{ \
.type = RandomOperandType::INPUT, \
- .constructor = [](Type, uint32_t rank, \
+ .constructor = [](TestOperandType, uint32_t rank, \
RandomOperand* op) { defaultOperandConstructor((opType), rank, op); }, \
.finalizer = uniformFinalizer \
}
@@ -262,17 +266,17 @@ inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op
// An INPUT operand with uniformly distributed buffer values. The operand's data type is set to
// TENSOR_INT32 if the operation's primary data type is TENSOR_QUANT8_ASYMM. Otherwise, it is the
// same as INPUT_DEFAULT.
-#define INPUT_BIAS \
- { \
- .type = RandomOperandType::INPUT, \
- .constructor = \
- [](Type dataType, uint32_t rank, RandomOperand* op) { \
- if (dataType == Type::TENSOR_QUANT8_ASYMM) { \
- dataType = Type::TENSOR_INT32; \
- } \
- defaultOperandConstructor(dataType, rank, op); \
- }, \
- .finalizer = uniformFinalizer \
+#define INPUT_BIAS \
+ { \
+ .type = RandomOperandType::INPUT, \
+ .constructor = \
+ [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \
+ if (dataType == TestOperandType::TENSOR_QUANT8_ASYMM) { \
+ dataType = TestOperandType::TENSOR_INT32; \
+ } \
+ defaultOperandConstructor(dataType, rank, op); \
+ }, \
+ .finalizer = uniformFinalizer \
}
// A helper macro for common code block filling operand buffer with random method.
@@ -291,11 +295,12 @@ inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op
}
// A 1-D vector of CONST parameters of length len, each uniformly selected within range [low, up].
-#define PARAMETER_VEC_RANGE(opType, len, low, up) \
- { \
- .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
- PARAMETER_FILL_BUFFER_HELPER(opType, len, getUniform, low, up); \
- } \
+#define PARAMETER_VEC_RANGE(opType, len, low, up) \
+ { \
+ .type = RandomOperandType::CONST, \
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \
+ PARAMETER_FILL_BUFFER_HELPER(opType, len, getUniform, low, up); \
+ } \
}
// A CONST scalar uniformly selected within range [low, up].
@@ -304,51 +309,54 @@ inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op
// A CONST floating point scalar uniformly selected within range [low, up]. The operand's data type
// is set to FLOAT16 if the operation's primary data type is TENSOR_FLOAT16. Otherwise, the data
// type is set to FLOAT32.
-#define PARAMETER_FLOAT_RANGE(low, up) \
- { \
- .type = RandomOperandType::CONST, \
- .constructor = [](Type dataType, uint32_t, RandomOperand* op) { \
- if (dataType == Type::TENSOR_FLOAT16) { \
- PARAMETER_FILL_BUFFER_HELPER(Type::FLOAT16, 1, getUniform, low, up); \
- } else { \
- PARAMETER_FILL_BUFFER_HELPER(Type::FLOAT32, 1, getUniform, low, up); \
- } \
- } \
+#define PARAMETER_FLOAT_RANGE(low, up) \
+ { \
+ .type = RandomOperandType::CONST, \
+ .constructor = [](TestOperandType dataType, uint32_t, RandomOperand* op) { \
+ if (dataType == TestOperandType::TENSOR_FLOAT16) { \
+ PARAMETER_FILL_BUFFER_HELPER(TestOperandType::FLOAT16, 1, getUniform, low, up); \
+ } else { \
+ PARAMETER_FILL_BUFFER_HELPER(TestOperandType::FLOAT32, 1, getUniform, low, up); \
+ } \
+ } \
}
// A CONST scalar uniformly selected from the provided choices.
-#define PARAMETER_CHOICE(opType, ...) \
- { \
- .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
- const std::vector<CppType<opType>::type> choices = {__VA_ARGS__}; \
- PARAMETER_FILL_BUFFER_HELPER(opType, 1, getRandomChoice, choices); \
- } \
+#define PARAMETER_CHOICE(opType, ...) \
+ { \
+ .type = RandomOperandType::CONST, \
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \
+ const std::vector<CppType<opType>::type> choices = {__VA_ARGS__}; \
+ PARAMETER_FILL_BUFFER_HELPER(opType, 1, getRandomChoice, choices); \
+ } \
}
// A CONST scalar with unintialized buffer value. The buffer values are expected to be filled in the
// operation constructor or finalizer.
-#define PARAMETER_NONE(opType) \
- { \
- .type = RandomOperandType::CONST, \
- .constructor = [](Type, uint32_t, RandomOperand* op) { op->dataType = opType; } \
+#define PARAMETER_NONE(opType) \
+ { \
+ .type = RandomOperandType::CONST, \
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { op->dataType = opType; } \
}
// A CONST integer scalar with value set as a FREE RandomVariable within default range.
-#define RANDOM_INT_FREE \
- { \
- .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
- op->dataType = Type::INT32; \
- op->randomBuffer = {RandomVariableType::FREE}; \
- } \
+#define RANDOM_INT_FREE \
+ { \
+ .type = RandomOperandType::CONST, \
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \
+ op->dataType = TestOperandType::INT32; \
+ op->randomBuffer = {RandomVariableType::FREE}; \
+ } \
}
// A CONST integer scalar with value set as a FREE RandomVariable within range [low, up].
-#define RANDOM_INT_RANGE(low, up) \
- { \
- .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) { \
- op->dataType = Type::INT32; \
- op->randomBuffer = {RandomVariable((low), (up))}; \
- } \
+#define RANDOM_INT_RANGE(low, up) \
+ { \
+ .type = RandomOperandType::CONST, \
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) { \
+ op->dataType = TestOperandType::INT32; \
+ op->randomBuffer = {RandomVariable((low), (up))}; \
+ } \
}
// An OUTPUT operand with data type set the same as the operation primary data type. In the case of
@@ -358,27 +366,27 @@ inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op
// An OUTPUT operand with a specified data type. In the case of quantized data type, the
// quantization parameters are chosen randomly and uniformly.
-#define OUTPUT_TYPED(opType) \
- { \
- .type = RandomOperandType::OUTPUT, \
- .constructor = [](Type, uint32_t rank, RandomOperand* op) { \
- defaultOperandConstructor((opType), rank, op); \
- } \
+#define OUTPUT_TYPED(opType) \
+ { \
+ .type = RandomOperandType::OUTPUT, \
+ .constructor = [](TestOperandType, uint32_t rank, RandomOperand* op) { \
+ defaultOperandConstructor((opType), rank, op); \
+ } \
}
// An OUTPUT operand with data type set the same as the operation primary data type. In the case of
// quantized data type, the quantization parameters are set to the specified values.
-#define OUTPUT_QUANT(fixedScale, fixedZeroPoint) \
- { \
- .type = RandomOperandType::OUTPUT, \
- .constructor = [](Type dataType, uint32_t rank, RandomOperand* op) { \
- defaultOperandConstructor(dataType, rank, op); \
- if (op->dataType == Type::TENSOR_QUANT8_ASYMM || \
- dataType == Type::TENSOR_QUANT8_SYMM) { \
- op->scale = (fixedScale); \
- op->zeroPoint = (fixedZeroPoint); \
- } \
- } \
+#define OUTPUT_QUANT(fixedScale, fixedZeroPoint) \
+ { \
+ .type = RandomOperandType::OUTPUT, \
+ .constructor = [](TestOperandType dataType, uint32_t rank, RandomOperand* op) { \
+ defaultOperandConstructor(dataType, rank, op); \
+ if (op->dataType == TestOperandType::TENSOR_QUANT8_ASYMM || \
+ dataType == TestOperandType::TENSOR_QUANT8_SYMM) { \
+ op->scale = (fixedScale); \
+ op->zeroPoint = (fixedZeroPoint); \
+ } \
+ } \
}
// DEFINE_OPERATION_SIGNATURE creates a OperationSignature by aggregate initialization and adds it
@@ -390,12 +398,10 @@ inline void defaultOperandConstructor(Type dataType, uint32_t, RandomOperand* op
// Example:
// DEFINE_OPERATION_SIGNATURE(RELU_V1_0) {
// .opType = ANEURALNETWORKS_RELU,
-// .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
-// .supportedRanks = {1, 2, 3, 4},
-// .version = HalVersion::V1_0,
-// .inputs = {INPUT_DEFAULT},
-// .outputs = {OUTPUT_DEFAULT},
-// .constructor = sameShapeOpConstructor};
+// .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+// TestOperandType::TENSOR_QUANT8_ASYMM}, .supportedRanks = {1, 2, 3, 4}, .version =
+// HalVersion::V1_0, .inputs = {INPUT_DEFAULT}, .outputs = {OUTPUT_DEFAULT}, .constructor =
+// sameShapeOpConstructor};
//
#define DEFINE_OPERATION_SIGNATURE(name) \
const int dummy_##name = OperationSignatureHelper(#name) + OperationSignature
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp b/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
index 56a721439..b7ce6b79d 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
@@ -21,7 +21,7 @@ namespace nn {
namespace fuzzing_test {
// For pooling ops with explicit padding.
-static void poolingExplicitOpConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void poolingExplicitOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -63,7 +63,7 @@ static void poolingExplicitOpConstructor(Type, uint32_t rank, RandomOperation* o
}
// For pooling ops with implicit padding.
-static void poolingImplicitOpConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void poolingImplicitOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
// Parameters
@@ -98,99 +98,104 @@ static void poolingImplicitOpConstructor(Type, uint32_t rank, RandomOperation* o
setSameQuantization(op->outputs[0], op->inputs[0]);
}
-#define DEFINE_POOLING_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_explicit_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {4}, \
- .version = HalVersion::ver, \
- .inputs = \
- { \
- INPUT_DEFAULT, \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- RANDOM_INT_RANGE(1, 4), \
- RANDOM_INT_RANGE(1, 4), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
- }, \
- .outputs = {OUTPUT_DEFAULT}, \
- .constructor = poolingExplicitOpConstructor}; \
- DEFINE_OPERATION_SIGNATURE(op##_implicit_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {4}, \
- .version = HalVersion::ver, \
- .inputs = \
- { \
- INPUT_DEFAULT, \
- PARAMETER_CHOICE(Type::INT32, 1, 2), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- RANDOM_INT_RANGE(1, 4), \
- RANDOM_INT_RANGE(1, 4), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
- }, \
- .outputs = {OUTPUT_DEFAULT}, \
+#define DEFINE_POOLING_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_explicit_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = HalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 4), \
+ RANDOM_INT_RANGE(1, 4), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = poolingExplicitOpConstructor}; \
+ DEFINE_OPERATION_SIGNATURE(op##_implicit_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = HalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 4), \
+ RANDOM_INT_RANGE(1, 4), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
.constructor = poolingImplicitOpConstructor};
-DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_0, Type::TENSOR_FLOAT32);
-DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-
-DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_2, Type::TENSOR_FLOAT16);
-DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_2, Type::TENSOR_FLOAT16);
-
-#define DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_explicit_layout_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {4}, \
- .version = HalVersion::ver, \
- .inputs = \
- { \
- INPUT_DEFAULT, \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- RANDOM_INT_RANGE(1, 4), \
- RANDOM_INT_RANGE(1, 4), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
- PARAMETER_CHOICE(Type::BOOL, true, false), \
- }, \
- .outputs = {OUTPUT_DEFAULT}, \
- .constructor = poolingExplicitOpConstructor}; \
- DEFINE_OPERATION_SIGNATURE(op##_implicit_layout_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {4}, \
- .version = HalVersion::ver, \
- .inputs = \
- { \
- INPUT_DEFAULT, \
- PARAMETER_CHOICE(Type::INT32, 1, 2), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- PARAMETER_RANGE(Type::INT32, 1, 3), \
- RANDOM_INT_RANGE(1, 4), \
- RANDOM_INT_RANGE(1, 4), \
- PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \
- PARAMETER_CHOICE(Type::BOOL, true, false), \
- }, \
- .outputs = {OUTPUT_DEFAULT}, \
+DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_0, TestOperandType::TENSOR_FLOAT32);
+DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_0, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+
+DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT16);
+DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT16);
+
+#define DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_explicit_layout_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = HalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 4), \
+ RANDOM_INT_RANGE(1, 4), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = poolingExplicitOpConstructor}; \
+ DEFINE_OPERATION_SIGNATURE(op##_implicit_layout_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = HalVersion::ver, \
+ .inputs = \
+ { \
+ INPUT_DEFAULT, \
+ PARAMETER_CHOICE(TestOperandType::INT32, 1, 2), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ PARAMETER_RANGE(TestOperandType::INT32, 1, 3), \
+ RANDOM_INT_RANGE(1, 4), \
+ RANDOM_INT_RANGE(1, 4), \
+ PARAMETER_CHOICE(TestOperandType::INT32, 0, 1, 2, 3), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false), \
+ }, \
+ .outputs = {OUTPUT_DEFAULT}, \
.constructor = poolingImplicitOpConstructor};
-DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(AVERAGE_POOL_2D, V1_2, Type::TENSOR_FLOAT32,
- Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(L2_POOL_2D, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM);
+DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(AVERAGE_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(L2_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_QUANT8_ASYMM);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
index 8d3a380df..937fab8b0 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
@@ -20,7 +20,7 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-static void reduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void reduceOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
// A boolean array indicating whether each dimension is selected to be reduced.
@@ -38,7 +38,7 @@ static void reduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
// This scalar may have two types: in MEAN it is INT32, in REDUCE_* it is BOOL
bool keepDims;
- if (op->inputs[2]->dataType == Type::BOOL) {
+ if (op->inputs[2]->dataType == TestOperandType::BOOL) {
keepDims = op->inputs[2]->value<bool8>();
} else {
keepDims = op->inputs[2]->value<int32_t>() > 0;
@@ -60,41 +60,43 @@ static void reduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
}
}
-#define DEFINE_MEAN_SIGNATURE(ver, ...) \
- DEFINE_OPERATION_SIGNATURE(MEAN_##ver){ \
- .opType = ANEURALNETWORKS_MEAN, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32), \
- PARAMETER_CHOICE(Type::INT32, -100, 100)}, \
- .outputs = {OUTPUT_DEFAULT}, \
+#define DEFINE_MEAN_SIGNATURE(ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(MEAN_##ver){ \
+ .opType = ANEURALNETWORKS_MEAN, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ PARAMETER_CHOICE(TestOperandType::INT32, -100, 100)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
.constructor = reduceOpConstructor};
-DEFINE_MEAN_SIGNATURE(V1_1, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
-DEFINE_MEAN_SIGNATURE(V1_2, Type::TENSOR_FLOAT16);
-
-#define DEFINE_REDUCE_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {1, 2, 3, 4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32), \
- PARAMETER_CHOICE(Type::BOOL, true, false)}, \
- .outputs = {OUTPUT_DEFAULT}, \
+DEFINE_MEAN_SIGNATURE(V1_1, TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_MEAN_SIGNATURE(V1_2, TestOperandType::TENSOR_FLOAT16);
+
+#define DEFINE_REDUCE_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
.constructor = reduceOpConstructor};
-DEFINE_REDUCE_SIGNATURE(REDUCE_ALL, V1_2, Type::TENSOR_BOOL8);
-DEFINE_REDUCE_SIGNATURE(REDUCE_ANY, V1_2, Type::TENSOR_BOOL8);
-DEFINE_REDUCE_SIGNATURE(REDUCE_PROD, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_REDUCE_SIGNATURE(REDUCE_SUM, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_REDUCE_SIGNATURE(REDUCE_MAX, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM);
-DEFINE_REDUCE_SIGNATURE(REDUCE_MIN, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
- Type::TENSOR_QUANT8_ASYMM);
-
-static void singleAxisReduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
+DEFINE_REDUCE_SIGNATURE(REDUCE_ALL, V1_2, TestOperandType::TENSOR_BOOL8);
+DEFINE_REDUCE_SIGNATURE(REDUCE_ANY, V1_2, TestOperandType::TENSOR_BOOL8);
+DEFINE_REDUCE_SIGNATURE(REDUCE_PROD, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_REDUCE_SIGNATURE(REDUCE_SUM, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16);
+DEFINE_REDUCE_SIGNATURE(REDUCE_MAX, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM);
+DEFINE_REDUCE_SIGNATURE(REDUCE_MIN, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_QUANT8_ASYMM);
+
+static void singleAxisReduceOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
// "axis" must be in the range [-rank, rank).
// Negative "axis" is used to specify axis from the end.
@@ -110,12 +112,13 @@ static void singleAxisReduceOpConstructor(Type, uint32_t rank, RandomOperation*
#define DEFINE_ARGMIN_MAX_SIGNATURE(op, ver, ...) \
DEFINE_OPERATION_SIGNATURE(op##_##ver){ \
.opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32, \
- Type::TENSOR_QUANT8_ASYMM}, \
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, \
+ TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_INT32, \
+ TestOperandType::TENSOR_QUANT8_ASYMM}, \
.supportedRanks = {1, 2, 3, 4, 5}, \
.version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)}, \
- .outputs = {OUTPUT_TYPED(Type::TENSOR_INT32)}, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)}, \
+ .outputs = {OUTPUT_TYPED(TestOperandType::TENSOR_INT32)}, \
.constructor = singleAxisReduceOpConstructor};
DEFINE_ARGMIN_MAX_SIGNATURE(ARGMAX, V1_2);
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp
index 5963fac30..24fef4da6 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Reshape.cpp
@@ -14,13 +14,16 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <vector>
+
#include "fuzzing/operation_signatures/OperationSignatureUtils.h"
namespace android {
namespace nn {
namespace fuzzing_test {
-static void spaceToDepthConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void spaceToDepthConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
bool useNchw = false;
@@ -46,34 +49,36 @@ static void spaceToDepthConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_V1_0){
.opType = ANEURALNETWORKS_SPACE_TO_DEPTH,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 5)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)},
.outputs = {OUTPUT_DEFAULT},
.constructor = spaceToDepthConstructor};
DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_V1_2){
.opType = ANEURALNETWORKS_SPACE_TO_DEPTH,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 5)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)},
.outputs = {OUTPUT_DEFAULT},
.constructor = spaceToDepthConstructor};
DEFINE_OPERATION_SIGNATURE(SPACE_TO_DEPTH_layout_V1_2){
.opType = ANEURALNETWORKS_SPACE_TO_DEPTH,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 5),
- PARAMETER_CHOICE(Type::BOOL, true, false)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
.outputs = {OUTPUT_DEFAULT},
.constructor = spaceToDepthConstructor};
-static void depthToSpaceConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void depthToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
bool useNchw = false;
@@ -99,34 +104,36 @@ static void depthToSpaceConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_V1_0){
.opType = ANEURALNETWORKS_DEPTH_TO_SPACE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 3)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3)},
.outputs = {OUTPUT_DEFAULT},
.constructor = depthToSpaceConstructor};
DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_V1_2){
.opType = ANEURALNETWORKS_DEPTH_TO_SPACE,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 3)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3)},
.outputs = {OUTPUT_DEFAULT},
.constructor = depthToSpaceConstructor};
DEFINE_OPERATION_SIGNATURE(DEPTH_TO_SPACE_layout_V1_2){
.opType = ANEURALNETWORKS_DEPTH_TO_SPACE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
.outputs = {OUTPUT_DEFAULT},
.constructor = depthToSpaceConstructor};
-static void reshapeConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void reshapeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
op->inputs[1]->dimensions = {rank};
op->inputs[1]->randomBuffer.resize(rank);
@@ -144,23 +151,24 @@ static void reshapeConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(RESHAPE_V1_0){
.opType = ANEURALNETWORKS_RESHAPE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_0,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = reshapeConstructor};
DEFINE_OPERATION_SIGNATURE(RESHAPE_V1_2){
.opType = ANEURALNETWORKS_RESHAPE,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = reshapeConstructor};
-static void batchToSpaceConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void batchToSpaceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
bool useNchw = false;
@@ -186,37 +194,39 @@ static void batchToSpaceConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_V1_1){
.opType = ANEURALNETWORKS_BATCH_TO_SPACE_ND,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_1,
.inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(Type::TENSOR_INT32, /*len=*/2, /*range=*/1, 3)},
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 3)},
.outputs = {OUTPUT_DEFAULT},
.constructor = batchToSpaceConstructor};
DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_V1_2){
.opType = ANEURALNETWORKS_BATCH_TO_SPACE_ND,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(Type::TENSOR_INT32, /*len=*/2, /*range=*/1, 3)},
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 3)},
.outputs = {OUTPUT_DEFAULT},
.constructor = batchToSpaceConstructor};
DEFINE_OPERATION_SIGNATURE(BATCH_TO_SPACE_ND_layout_V1_2){
.opType = ANEURALNETWORKS_BATCH_TO_SPACE_ND,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(Type::TENSOR_INT32, /*len=*/2, /*range=*/1, 3),
- PARAMETER_CHOICE(Type::BOOL, true, false)},
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 3),
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
.outputs = {OUTPUT_DEFAULT},
.constructor = batchToSpaceConstructor};
-static void spaceToBatchConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void spaceToBatchConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
bool useNchw = false;
@@ -250,8 +260,9 @@ static void spaceToBatchConstructor(Type, uint32_t rank, RandomOperation* op) {
// The paddings tensor in SPACE_TOBATCH_ND, a [2, 2] tensor with value selected from [0, 10].
static const OperandSignature paddingTensor_SPACE_TO_BATCH_ND = {
- .type = RandomOperandType::CONST, .constructor = [](Type, uint32_t, RandomOperand* op) {
- op->dataType = Type::TENSOR_INT32;
+ .type = RandomOperandType::CONST,
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) {
+ op->dataType = TestOperandType::TENSOR_INT32;
op->dimensions = {2, 2};
op->resizeBuffer<int32_t>(4);
for (int i = 0; i < 4; i++) op->value<int32_t>(i) = getUniform<int32_t>(0, 10);
@@ -259,39 +270,42 @@ static const OperandSignature paddingTensor_SPACE_TO_BATCH_ND = {
DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_V1_1){
.opType = ANEURALNETWORKS_SPACE_TO_BATCH_ND,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {4},
.version = HalVersion::V1_1,
.inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(Type::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
paddingTensor_SPACE_TO_BATCH_ND},
.outputs = {OUTPUT_DEFAULT},
.constructor = spaceToBatchConstructor};
DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_V1_2){
.opType = ANEURALNETWORKS_SPACE_TO_BATCH_ND,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(Type::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
paddingTensor_SPACE_TO_BATCH_ND},
.outputs = {OUTPUT_DEFAULT},
.constructor = spaceToBatchConstructor};
DEFINE_OPERATION_SIGNATURE(SPACE_TO_BATCH_ND_layout_V1_2){
.opType = ANEURALNETWORKS_SPACE_TO_BATCH_ND,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT,
- PARAMETER_VEC_RANGE(Type::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
- paddingTensor_SPACE_TO_BATCH_ND, PARAMETER_CHOICE(Type::BOOL, true, false)},
+ PARAMETER_VEC_RANGE(TestOperandType::TENSOR_INT32, /*len=*/2, /*range=*/1, 5),
+ paddingTensor_SPACE_TO_BATCH_ND,
+ PARAMETER_CHOICE(TestOperandType::BOOL, true, false)},
.outputs = {OUTPUT_DEFAULT},
.constructor = spaceToBatchConstructor};
-static void padConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void padConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
op->inputs[1]->dimensions = {rank, 2};
op->inputs[1]->resizeBuffer<int32_t>(rank * 2);
@@ -307,18 +321,18 @@ static void padConstructor(Type, uint32_t rank, RandomOperation* op) {
static const OperandSignature paddingScalar_PAD_V2 = {
.type = RandomOperandType::CONST,
- .constructor = [](Type dataType, uint32_t, RandomOperand* op) {
+ .constructor = [](TestOperandType dataType, uint32_t, RandomOperand* op) {
switch (dataType) {
- case Type::TENSOR_FLOAT32:
- op->dataType = Type::FLOAT32;
+ case TestOperandType::TENSOR_FLOAT32:
+ op->dataType = TestOperandType::FLOAT32;
op->setScalarValue<float>(getUniform<float>(-10.0f, 10.0f));
break;
- case Type::TENSOR_FLOAT16:
- op->dataType = Type::FLOAT16;
+ case TestOperandType::TENSOR_FLOAT16:
+ op->dataType = TestOperandType::FLOAT16;
op->setScalarValue<_Float16>(getUniform<_Float16>(-10.0f, 10.0f));
break;
- case Type::TENSOR_QUANT8_ASYMM:
- op->dataType = Type::INT32;
+ case TestOperandType::TENSOR_QUANT8_ASYMM:
+ op->dataType = TestOperandType::INT32;
op->setScalarValue<int32_t>(getUniform<int32_t>(0, 255));
break;
default:
@@ -328,32 +342,36 @@ static const OperandSignature paddingScalar_PAD_V2 = {
DEFINE_OPERATION_SIGNATURE(PAD_V1_1){
.opType = ANEURALNETWORKS_PAD,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = padConstructor};
-DEFINE_OPERATION_SIGNATURE(PAD_V1_2){.opType = ANEURALNETWORKS_PAD,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
- .supportedRanks = {1, 2, 3, 4},
- .version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
- .outputs = {OUTPUT_DEFAULT},
- .constructor = padConstructor};
+DEFINE_OPERATION_SIGNATURE(PAD_V1_2){
+ .opType = ANEURALNETWORKS_PAD,
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
+ .supportedRanks = {1, 2, 3, 4},
+ .version = HalVersion::V1_2,
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
+ .outputs = {OUTPUT_DEFAULT},
+ .constructor = padConstructor};
DEFINE_OPERATION_SIGNATURE(PAD_V2_V1_2){
.opType = ANEURALNETWORKS_PAD_V2,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32), paddingScalar_PAD_V2},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ paddingScalar_PAD_V2},
.outputs = {OUTPUT_DEFAULT},
.constructor = padConstructor};
-static void transposeConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void transposeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// Create the permutation value by randomly shuffling a sequential array.
std::vector<int32_t> permutation(rank);
std::iota(permutation.begin(), permutation.end(), 0);
@@ -374,23 +392,25 @@ static void transposeConstructor(Type, uint32_t rank, RandomOperation* op) {
// TODO: Test the case when the second input is omitted.
DEFINE_OPERATION_SIGNATURE(TRANSPOSE_V1_1){
.opType = ANEURALNETWORKS_TRANSPOSE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = transposeConstructor};
DEFINE_OPERATION_SIGNATURE(TRANSPOSE_V1_2){
.opType = ANEURALNETWORKS_TRANSPOSE,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = transposeConstructor};
-static void channelShuffleConstructor(Type dataType, uint32_t rank, RandomOperation* op) {
+static void channelShuffleConstructor(TestOperandType dataType, uint32_t rank,
+ RandomOperation* op) {
sameShapeOpConstructor(dataType, rank, op);
// The number of groups must be a divisor of the target axis size.
int32_t axis = getUniform<int32_t>(-rank, rank - 1);
@@ -402,15 +422,17 @@ static void channelShuffleConstructor(Type dataType, uint32_t rank, RandomOperat
DEFINE_OPERATION_SIGNATURE(CHANNEL_SHUFFLE_V1_2){
.opType = ANEURALNETWORKS_CHANNEL_SHUFFLE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(Type::INT32, 1, 5), PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5),
+ PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = channelShuffleConstructor};
-static void squeezeConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void squeezeConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// A boolean array indicating whether each dimension is selected to be squeezed.
bool squeeze[4] = {false, false, false, false};
uint32_t numAxis = getUniform<int32_t>(1, 10);
@@ -438,23 +460,24 @@ static void squeezeConstructor(Type, uint32_t rank, RandomOperation* op) {
// TODO: Test the case when the second input is omitted.
DEFINE_OPERATION_SIGNATURE(SQUEEZE_V1_1){
.opType = ANEURALNETWORKS_SQUEEZE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = squeezeConstructor};
DEFINE_OPERATION_SIGNATURE(SQUEEZE_V1_2){
.opType = ANEURALNETWORKS_SQUEEZE,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = squeezeConstructor};
-static void expandDimsConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void expandDimsConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// Generate values for the "axis" tensor.
int32_t axis = getUniform<int32_t>(-rank - 1, rank);
op->inputs[1]->setScalarValue<int32_t>(axis);
@@ -473,15 +496,15 @@ static void expandDimsConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(EXPAND_DIMS_V1_2){
.opType = ANEURALNETWORKS_EXPAND_DIMS,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4, 5},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = expandDimsConstructor};
-static void tileConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void tileConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
op->outputs[0]->dimensions.resize(rank);
op->inputs[1]->dimensions = {rank};
@@ -496,11 +519,11 @@ static void tileConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(TILE_V1_2){
.opType = ANEURALNETWORKS_TILE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4, 5},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = tileConstructor};
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp b/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp
index b2d053151..3e48d96f8 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Resize.cpp
@@ -20,7 +20,7 @@ namespace android {
namespace nn {
namespace fuzzing_test {
-static void resizeOpConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void resizeOpConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
NN_FUZZER_CHECK(rank == 4);
setFreeDimensions(op->inputs[0], rank);
@@ -32,16 +32,16 @@ static void resizeOpConstructor(Type, uint32_t rank, RandomOperation* op) {
RandomVariable outHeight, outWidth;
switch (op->inputs[1]->dataType) {
// Resize by shape.
- case Type::INT32:
+ case TestOperandType::INT32:
outWidth = op->inputs[1]->value<RandomVariable>();
outHeight = op->inputs[2]->value<RandomVariable>();
break;
// Resize by scale.
- case Type::FLOAT32:
+ case TestOperandType::FLOAT32:
outWidth = op->inputs[0]->dimensions[widthIndex] * op->inputs[1]->value<float>();
outHeight = op->inputs[0]->dimensions[heightIndex] * op->inputs[2]->value<float>();
break;
- case Type::FLOAT16:
+ case TestOperandType::FLOAT16:
outWidth = op->inputs[0]->dimensions[widthIndex] *
static_cast<float>(op->inputs[1]->value<_Float16>());
outHeight = op->inputs[0]->dimensions[heightIndex] *
@@ -71,35 +71,35 @@ static void resizeOpConstructor(Type, uint32_t rank, RandomOperation* op) {
.outputs = {OUTPUT_DEFAULT}, \
.constructor = resizeOpConstructor};
-DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_0, Type::TENSOR_FLOAT32);
-DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_2, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16);
+DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_0, TestOperandType::TENSOR_FLOAT32);
+DEFINE_RESIZE_WITHOUT_LAYOUT_SIGNATURE(RESIZE_BILINEAR, V1_2, TestOperandType::TENSOR_QUANT8_ASYMM,
+ TestOperandType::TENSOR_FLOAT16);
-#define DEFINE_RESIZE_OP_SIGNATURE(op, ver, ...) \
- DEFINE_OPERATION_SIGNATURE(op##_shape_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, RANDOM_INT_FREE, RANDOM_INT_FREE, \
- PARAMETER_CHOICE(Type::BOOL, false, true)}, \
- .outputs = {OUTPUT_DEFAULT}, \
- .constructor = resizeOpConstructor}; \
- DEFINE_OPERATION_SIGNATURE(op##_scale_##ver){ \
- .opType = ANEURALNETWORKS_##op, \
- .supportedDataTypes = {__VA_ARGS__}, \
- .supportedRanks = {4}, \
- .version = HalVersion::ver, \
- .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.2, 4.0), \
- PARAMETER_FLOAT_RANGE(0.2, 4.0), \
- PARAMETER_CHOICE(Type::BOOL, false, true)}, \
- .outputs = {OUTPUT_DEFAULT}, \
+#define DEFINE_RESIZE_OP_SIGNATURE(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_shape_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, RANDOM_INT_FREE, RANDOM_INT_FREE, \
+ PARAMETER_CHOICE(TestOperandType::BOOL, false, true)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = resizeOpConstructor}; \
+ DEFINE_OPERATION_SIGNATURE(op##_scale_##ver){ \
+ .opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {4}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT, PARAMETER_FLOAT_RANGE(0.2, 4.0), \
+ PARAMETER_FLOAT_RANGE(0.2, 4.0), \
+ PARAMETER_CHOICE(TestOperandType::BOOL, false, true)}, \
+ .outputs = {OUTPUT_DEFAULT}, \
.constructor = resizeOpConstructor};
-DEFINE_RESIZE_OP_SIGNATURE(RESIZE_BILINEAR, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM,
- Type::TENSOR_FLOAT16);
-DEFINE_RESIZE_OP_SIGNATURE(RESIZE_NEAREST_NEIGHBOR, V1_2, Type::TENSOR_FLOAT32,
- Type::TENSOR_QUANT8_ASYMM, Type::TENSOR_FLOAT16);
+DEFINE_RESIZE_OP_SIGNATURE(RESIZE_BILINEAR, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_FLOAT16);
+DEFINE_RESIZE_OP_SIGNATURE(RESIZE_NEAREST_NEIGHBOR, V1_2, TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_FLOAT16);
} // namespace fuzzing_test
} // namespace nn
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
index 432a48886..94805673f 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
@@ -14,13 +14,17 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <utility>
+#include <vector>
+
#include "fuzzing/operation_signatures/OperationSignatureUtils.h"
namespace android {
namespace nn {
namespace fuzzing_test {
-static void embeddingLookupConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void embeddingLookupConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], /*rank=*/1);
setFreeDimensions(op->inputs[1], rank);
op->outputs[0]->dimensions.resize(rank);
@@ -42,15 +46,16 @@ static void embeddingLookupFinalizer(RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(EMBEDDING_LOOKUP_V1_0){
.opType = ANEURALNETWORKS_EMBEDDING_LOOKUP,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {2, 3, 4},
.version = HalVersion::V1_0,
- .inputs = {PARAMETER_NONE(Type::TENSOR_INT32), INPUT_DEFAULT},
+ .inputs = {PARAMETER_NONE(TestOperandType::TENSOR_INT32), INPUT_DEFAULT},
.outputs = {OUTPUT_DEFAULT},
.constructor = embeddingLookupConstructor,
.finalizer = embeddingLookupFinalizer};
-static void hashtableLookupConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void hashtableLookupConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
op->inputs[0]->dimensions = {RandomVariableType::FREE};
op->inputs[1]->dimensions = {RandomVariableType::FREE};
op->inputs[2]->dimensions.resize(rank);
@@ -82,24 +87,26 @@ static void hashtableLookupFinalizer(RandomOperation* op) {
// The hits tensor in HASHTABLE_LOOKUP.
static const OperandSignature hitsTensor_HASHTABLE_LOOKUP = {
- .type = RandomOperandType::OUTPUT, .constructor = [](Type, uint32_t, RandomOperand* op) {
- op->dataType = Type::TENSOR_QUANT8_ASYMM;
+ .type = RandomOperandType::OUTPUT,
+ .constructor = [](TestOperandType, uint32_t, RandomOperand* op) {
+ op->dataType = TestOperandType::TENSOR_QUANT8_ASYMM;
op->scale = 1.0f;
op->zeroPoint = 0;
}};
DEFINE_OPERATION_SIGNATURE(HASHTABLE_LOOKUP_V1_0){
.opType = ANEURALNETWORKS_HASHTABLE_LOOKUP,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_INT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {2, 3, 4},
.version = HalVersion::V1_0,
- .inputs = {PARAMETER_NONE(Type::TENSOR_INT32), PARAMETER_NONE(Type::TENSOR_INT32),
- INPUT_DEFAULT},
+ .inputs = {PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32), INPUT_DEFAULT},
.outputs = {OUTPUT_DEFAULT, hitsTensor_HASHTABLE_LOOKUP},
.constructor = hashtableLookupConstructor,
.finalizer = hashtableLookupFinalizer};
-static void gatherConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void gatherConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
// Generate value for "axis" scalar.
int32_t axis = getUniform<int32_t>(-rank, rank - 1);
op->inputs[1]->setScalarValue<int32_t>(axis);
@@ -135,16 +142,17 @@ static void gatherFinalizer(RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(GATHER_V1_2){
.opType = ANEURALNETWORKS_GATHER,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4, 5},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32), PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = gatherConstructor,
.finalizer = gatherFinalizer};
-static void selectConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void selectConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
op->inputs[1]->dimensions = op->inputs[0]->dimensions;
op->inputs[2]->dimensions = op->inputs[0]->dimensions;
@@ -155,15 +163,15 @@ static void selectConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(SELECT_V1_2){
.opType = ANEURALNETWORKS_SELECT,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_TYPED(Type::TENSOR_BOOL8), INPUT_DEFAULT, INPUT_DEFAULT},
+ .inputs = {INPUT_TYPED(TestOperandType::TENSOR_BOOL8), INPUT_DEFAULT, INPUT_DEFAULT},
.outputs = {OUTPUT_DEFAULT},
.constructor = selectConstructor};
-static void topKConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void topKConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
setFreeDimensions(op->inputs[0], rank);
op->outputs[0]->dimensions.resize(rank);
op->outputs[1]->dimensions.resize(rank);
@@ -188,15 +196,15 @@ static void topKConstructor(Type, uint32_t rank, RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(TOPK_V2_V1_2){
.opType = ANEURALNETWORKS_TOPK_V2,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
.inputs = {INPUT_DEFAULT, RANDOM_INT_FREE},
- .outputs = {OUTPUT_DEFAULT, OUTPUT_TYPED(Type::TENSOR_INT32)},
+ .outputs = {OUTPUT_DEFAULT, OUTPUT_TYPED(TestOperandType::TENSOR_INT32)},
.constructor = topKConstructor};
-static void sliceConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void sliceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
op->inputs[1]->dimensions = {rank};
op->inputs[2]->dimensions = {rank};
setFreeDimensions(op->inputs[0], rank);
@@ -223,12 +231,12 @@ static void sliceFinalizer(RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(SLICE_V1_2){
.opType = ANEURALNETWORKS_SLICE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
- Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16,
+ TestOperandType::TENSOR_INT32, TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32),
- PARAMETER_NONE(Type::TENSOR_INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = sliceConstructor,
.finalizer = sliceFinalizer};
@@ -242,7 +250,7 @@ inline int32_t convertToBitMask(const std::vector<bool>& flags) {
return mask;
}
-static void stridedSliceConstructor(Type, uint32_t rank, RandomOperation* op) {
+static void stridedSliceConstructor(TestOperandType, uint32_t rank, RandomOperation* op) {
op->inputs[1]->dimensions = {rank};
op->inputs[2]->dimensions = {rank};
op->inputs[3]->dimensions = {rank};
@@ -314,26 +322,30 @@ static void stridedSliceFinalizer(RandomOperation* op) {
DEFINE_OPERATION_SIGNATURE(STRIDED_SLICE_V1_1){
.opType = ANEURALNETWORKS_STRIDED_SLICE,
- .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT32,
+ TestOperandType::TENSOR_QUANT8_ASYMM},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_1,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32),
- PARAMETER_NONE(Type::TENSOR_INT32), PARAMETER_NONE(Type::TENSOR_INT32),
- PARAMETER_CHOICE(Type::INT32, 0), PARAMETER_CHOICE(Type::INT32, 0),
- PARAMETER_CHOICE(Type::INT32, 0)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0),
+ PARAMETER_CHOICE(TestOperandType::INT32, 0)},
.outputs = {OUTPUT_DEFAULT},
.constructor = stridedSliceConstructor,
.finalizer = stridedSliceFinalizer};
DEFINE_OPERATION_SIGNATURE(STRIDED_SLICE_V1_2){
.opType = ANEURALNETWORKS_STRIDED_SLICE,
- .supportedDataTypes = {Type::TENSOR_FLOAT16},
+ .supportedDataTypes = {TestOperandType::TENSOR_FLOAT16},
.supportedRanks = {1, 2, 3, 4},
.version = HalVersion::V1_2,
- .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32),
- PARAMETER_NONE(Type::TENSOR_INT32), PARAMETER_NONE(Type::TENSOR_INT32),
- PARAMETER_NONE(Type::INT32), PARAMETER_NONE(Type::INT32),
- PARAMETER_NONE(Type::INT32)},
+ .inputs = {INPUT_DEFAULT, PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::TENSOR_INT32),
+ PARAMETER_NONE(TestOperandType::INT32), PARAMETER_NONE(TestOperandType::INT32),
+ PARAMETER_NONE(TestOperandType::INT32)},
.outputs = {OUTPUT_DEFAULT},
.constructor = stridedSliceConstructor,
.finalizer = stridedSliceFinalizer};