summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2020-11-01 23:04:51 -0800
committerMichael Butler <butlermichael@google.com>2020-11-12 22:31:18 +0000
commit8250ec9e013fa53237f385deb9e6fdc127f79c93 (patch)
tree4960a463532cf3562bcde9be58472459b54c0029
parentf1c452cda6533807bcab2337cb13d5184405505c (diff)
downloadml-8250ec9e013fa53237f385deb9e6fdc127f79c93.tar.gz
Remove HAL types from NNAPI fuzz test harness
Switches the implementation from using 1.3 HAL types to using canonical types. Bug: N/A Test: mma Test: libneuralnetworks_fuzzer Change-Id: Ia9b76f324ddb5559cb2834fc13b3a09a62d3d0bd
-rw-r--r--nn/runtime/test/android_fuzzing/Android.bp2
-rw-r--r--nn/runtime/test/android_fuzzing/Converter.cpp11
-rw-r--r--nn/runtime/test/android_fuzzing/FuzzHarness.cpp9
3 files changed, 10 insertions, 12 deletions
diff --git a/nn/runtime/test/android_fuzzing/Android.bp b/nn/runtime/test/android_fuzzing/Android.bp
index 02a32256f..89ce1ea78 100644
--- a/nn/runtime/test/android_fuzzing/Android.bp
+++ b/nn/runtime/test/android_fuzzing/Android.bp
@@ -40,7 +40,6 @@ cc_library_static {
"libprotobuf-cpp-full",
],
static_libs: [
- "android.hardware.neuralnetworks@1.3",
"libneuralnetworks_common",
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_static",
@@ -48,6 +47,7 @@ cc_library_static {
whole_static_libs: [
"libneuralnetworks_fuzzer_proto",
"libprotobuf-mutator",
+ "neuralnetworks_types",
],
}
diff --git a/nn/runtime/test/android_fuzzing/Converter.cpp b/nn/runtime/test/android_fuzzing/Converter.cpp
index c2fc354fa..4733b3b55 100644
--- a/nn/runtime/test/android_fuzzing/Converter.cpp
+++ b/nn/runtime/test/android_fuzzing/Converter.cpp
@@ -17,14 +17,13 @@
#include "Converter.h"
#include <android-base/logging.h>
+#include <nnapi/TypeUtils.h>
#include <algorithm>
#include <random>
#include <utility>
#include <vector>
-#include "Utils.h"
-
namespace android::nn::fuzz {
namespace {
@@ -78,11 +77,11 @@ TestOperand convert(const android_nn_fuzz::Operand& operand) {
auto channelQuant = convert(operand.channel_quant());
const bool isIgnored = false;
- const auto halType = static_cast<V1_3::OperandType>(type);
- const bool willOverflow = nonExtensionOperandSizeOfDataOverflowsUInt32(halType, dimensions);
+ const auto opType = static_cast<OperandType>(type);
+ const size_t size = getNonExtensionSize(opType, dimensions).value_or(0);
const bool makeEmpty = (lifetime == TestOperandLifeTime::NO_VALUE ||
- lifetime == TestOperandLifeTime::TEMPORARY_VARIABLE || willOverflow);
- const size_t bufferSize = makeEmpty ? 0 : nonExtensionOperandSizeOfData(halType, dimensions);
+ lifetime == TestOperandLifeTime::TEMPORARY_VARIABLE);
+ const size_t bufferSize = makeEmpty ? 0 : size;
TestBuffer data = convert(bufferSize, operand.data());
return {.type = type,
diff --git a/nn/runtime/test/android_fuzzing/FuzzHarness.cpp b/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
index 76c34a75a..17dac3ab9 100644
--- a/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
+++ b/nn/runtime/test/android_fuzzing/FuzzHarness.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/TypeUtils.h>
#include <src/libfuzzer/libfuzzer_macro.h>
#include <algorithm>
@@ -22,22 +22,21 @@
#include "Converter.h"
#include "Model.pb.h"
#include "TestHarness.h"
-#include "Utils.h"
// Fuzz test logic. This function will either run to completion and return, or crash.
extern void nnapiFuzzTest(const ::test_helper::TestModel& testModel);
namespace {
-using ::android::nn::nonExtensionOperandSizeOfDataOverflowsUInt32;
+using ::android::nn::getNonExtensionSize;
+using ::android::nn::OperandType;
using ::android::nn::fuzz::convertToTestModel;
-using ::android::nn::V1_3::OperandType;
using ::test_helper::TestModel;
using ::test_helper::TestOperand;
bool operandOverflows(const TestOperand& operand) {
const auto operandType = static_cast<OperandType>(operand.type);
- return nonExtensionOperandSizeOfDataOverflowsUInt32(operandType, operand.dimensions);
+ return getNonExtensionSize(operandType, operand.dimensions).has_value();
}
bool shouldSkip(const TestModel& model) {