summaryrefslogtreecommitdiff
path: root/nn/runtime/include/NeuralNetworks.h
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2018-02-13 17:25:57 -0800
committerMichael Butler <butlermichael@google.com>2018-02-20 19:13:06 -0800
commit084401d6215dca122999261c5ac3718ebf61b29e (patch)
treefd1afdd052d4dcecf5103bd7e90333981bad2502 /nn/runtime/include/NeuralNetworks.h
parente78b89d46b7e534b749ace863886571a840e83e2 (diff)
downloadml-084401d6215dca122999261c5ac3718ebf61b29e.tar.gz
NNAPI narrow evaluation for P -- runtime
We have determined that for Android P it is sufficient to have a mechanism for a developer to specify on a per-model basis that it is acceptable for FLOAT32 operations to be carried out as if they were FLOAT16 operations. This CL manages the versioning differences between 1.0 and 1.1. Bug: 63911257 Test: mm Test: NeuralNetworksTest Test: VtsHalNeuralnetworksV1_0TargetTest Change-Id: If6f31536eedc92c4795056fdf3ff8818db1bc988
Diffstat (limited to 'nn/runtime/include/NeuralNetworks.h')
-rw-r--r--nn/runtime/include/NeuralNetworks.h22
1 files changed, 22 insertions, 0 deletions
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 00a60d309..008c1ab50 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -1953,6 +1953,28 @@ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, u
const uint32_t* outputs);
/**
+ * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
+ * calculated with range and/or precision as low as that of the IEEE 754 16-bit
+ * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * must be calculated using at least the range and precision of the IEEE 754
+ * 32-bit floating-point format.
+ *
+ * @param model The model to be modified.
+ * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
+ * calculated with range and/or precision as low as that of the
+ * IEEE 754 16-bit floating point format. 'false' indicates
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
+ * at least the range and precision of the IEEE 754 32-bit floating
+ * point format.
+ *
+ * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
+ * called will return an error.
+ *
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ */
+int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow);
+
+/**
* Create a {@link ANeuralNetworksCompilation} to compile the given model.
*
* <p>This only creates the object. Compilation is only performed once