summaryrefslogtreecommitdiff
path: root/nn
diff options
context:
space:
mode:
authorMika Raento <mikie@google.com>2018-04-26 08:12:25 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2018-04-26 08:12:25 +0000
commitc8081626b9a9283425430b3775948a64ee305b08 (patch)
tree5bb153e5368d0ccb6dfd9eaea12856b74e344fe4 /nn
parent560950ab0ee18b7a41ecce400f985eb015a4e46f (diff)
parent3340f8bea2913ee5f4d76193c11f1068b6582770 (diff)
downloadml-c8081626b9a9283425430b3775948a64ee305b08.tar.gz
Merge "Script syncing of types.hal from NeuralNetworks.h" into pi-dev
Diffstat (limited to 'nn')
-rw-r--r--nn/runtime/include/NeuralNetworks.h50
-rwxr-xr-xnn/tools/sync_enums_to_hal.py251
2 files changed, 274 insertions, 27 deletions
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index b1954b797..0fc927050 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -61,27 +61,23 @@ __BEGIN_DECLS
* and {@link ANEURALNETWORKS_INT32}.
*/
typedef enum {
- /** The following entries are used to declare scalars. */
-
/** A 32 bit floating point scalar value. */
- ANEURALNETWORKS_FLOAT32 = 0,
+ ANEURALNETWORKS_FLOAT32 = 0,
/** A signed 32 bit integer scalar value. */
- ANEURALNETWORKS_INT32 = 1,
+ ANEURALNETWORKS_INT32 = 1,
/** An unsigned 32 bit integer scalar value. */
- ANEURALNETWORKS_UINT32 = 2,
-
- /** The following entries are used to declare tensors. */
+ ANEURALNETWORKS_UINT32 = 2,
/** A tensor of 32 bit floating point values. */
- ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
+ ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
/** A tensor of 32 bit integer values. */
- ANEURALNETWORKS_TENSOR_INT32 = 4,
+ ANEURALNETWORKS_TENSOR_INT32 = 4,
/** A tensor of 8 bit integers that represent real numbers.
*
- * Attached to this tensor are two numbers that can be used to convert
- * the 8 bit integer to the real value and vice versa. These two numbers are:
+ * Attached to this tensor are two numbers that can be used to convert the
+ * 8 bit integer to the real value and vice versa. These two numbers are:
* - scale: a 32 bit floating point value greater than zero.
- * - zeroPoint: an 32 bit integer, in range [0, 255].
+ * - zeroPoint: a 32 bit integer, in range [0, 255].
*
* The formula is:
* real_value = (integer_value - zeroPoint) * scale.
@@ -413,12 +409,12 @@ typedef enum {
* to create the output tensor.
*
* For example, if Values has shape of [40, 200, 300] and
- * Lookups has shape of [3], we would expect all three values
- * found in Lookups to be between 0 and 39. The resulting tensor will
+ * Lookups has shape of [3], all three values found in Lookups are
+ * expected to be between 0 and 39. The resulting tensor must
* have shape of [3, 200, 300].
*
- * If a value in Lookups is out of bounds, the operation will fail
- * and an error will be reported.
+ * If a value in Lookups is out of bounds, the operation must fail
+ * and an error must be reported.
*
* Inputs:
* * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32} type.
@@ -499,17 +495,17 @@ typedef enum {
* same index as the Maps entry that matches the value in Lookups.
*
* For a hit, the corresponding sub-tensor of Values is included
- * in the Output tensor. For a miss, the corresponding sub-tensor in
- * Output will have zero values.
+ * in the Output tensor. For a miss, the corresponding sub-tensor in
+ * Output must have zero values.
*
* For example, if Values has shape of [40, 200, 300],
* Keys should have a shape of [40]. If Lookups tensor has shape
- * of [3], we're concatenating three slices, so the resulting tensor
- * will have the shape of [3, 200, 300]. If the first entry in
- * Lookups has the value 123456, we'll look for that value in Keys tensor.
- * If the sixth entry of Keys contains 123456, we'll select the sixth
- * slice of Values. If no entry in Keys has 123456, a slice of zeroes
- * will be concatenated.
+ * of [3], three slices are being concatenated, so the resulting tensor
+ * must have the shape of [3, 200, 300]. If the first entry in Lookups
+ * has the value 123456, that value must be located in Keys tensor.
+ * If the sixth entry of Keys contains 123456, the sixth slice of Values
+ * must be selected. If no entry in Keys has 123456, a slice of zeroes
+ * must be concatenated.
*
* Inputs:
* * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [ k ].
@@ -1016,7 +1012,7 @@ typedef enum {
/** Resizes images to given size using the bilinear interpretation.
*
- * Resized images will be distorted if their output aspect ratio is not the same as
+ * Resized images must be distorted if their output aspect ratio is not the same as
* input aspect ratio. The corner pixels of output may not be the same as
* corner pixels of input.
*
@@ -1388,7 +1384,7 @@ typedef enum {
* 0: An n-D tensor, the tensor to be squeezed.
* 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified
* only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
- * The dimension index starts at 0. An error will be reported if squeezing a dimension that
+ * The dimension index starts at 0. An error must be reported if squeezing a dimension that
* is not 1.
*
* Outputs:
@@ -1425,7 +1421,7 @@ typedef enum {
* the fullest possible range in that dimension is used instead.
* 6: An INT32 value, shrink_axis_mask. An int32 mask. If the ith bit of shrink_axis_mask is
* set, it implies that the ith specification shrinks the dimensionality by 1. A slice of
- * size 1 starting from begin[i] in the dimension will be preserved.
+ * size 1 starting from begin[i] in the dimension must be preserved.
*
* Outputs:
* 0: A tensor of the same type as input0.
diff --git a/nn/tools/sync_enums_to_hal.py b/nn/tools/sync_enums_to_hal.py
new file mode 100755
index 000000000..f73d95947
--- /dev/null
+++ b/nn/tools/sync_enums_to_hal.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python3
+""" Synchronizes enums and their comments from the NeuralNetworks.h to types.hal
+
+Workflow:
+ - Don't try to make other changes to types.hal in the same branch, as this
+ will check out and overwrite files
+ - Edit NeuralNetworks.h
+ - run sync_enums_to_hal.py
+ - can be run from anywhere, but ANDROID_BUILD_TOP must be set
+ - this resets 1.(0|1)/types.hal to last commit (so you can run
+ the script multiple times with changes to it in-between), and
+ - overwrites types.hal in-place
+ - Check the output (git diff)
+ - Recreate hashes
+ - commit and upload for review
+
+Note:
+This is somewhat brittle in terms of ordering and formatting of the
+relevant files. It's the author's opinion that it's not worth spending a lot of
+time upfront coming up with better mechanisms, but to improve it when needed.
+For example, currently Operations have differences between 1.0 and 1.1,
+but Operands do not, so the script is explicit rather than generic.
+
+There are asserts in the code to make sure the expectations on the ordering and
+formatting of the headers are met, so this should fail rather than produce
+completely unexpected output.
+
+The alternative would be to add explicit section markers to the files.
+
+"""
+
+import os
+import re
+import subprocess
+
+class HeaderReader(object):
+ """ Simple base class facilitates reading a file into sections and writing it
+ back out
+ """
+ def __init__(self):
+ self.sections = []
+ self.current = -1
+ self.next_section()
+
+ def put_back(self, no_of_lines=1):
+ assert not self.sections[self.current]
+ for i in range(0, no_of_lines):
+ line = self.sections[self.current - 1].pop()
+ self.sections[self.current].insert(0, line)
+
+ def next_section(self):
+ self.current = self.current + 1
+ self.sections.append([])
+
+ def get_contents(self):
+ return "".join([ "".join(s) for s in self.sections])
+
+ def get_section(self, which):
+ return "".join(self.sections[which])
+
+ def handle_line(self, line):
+ assert False
+
+ def read(self, filename):
+ assert self.current == 0
+ self.filename = filename
+ with open(filename) as f:
+ lines = f.readlines()
+ for line in lines:
+ self.sections[self.current].append(line)
+ if self.current == self.REST:
+ continue
+ self.handle_line(line)
+ assert self.current == self.REST
+
+ def write(self):
+ with open(self.filename, "w") as f:
+ f.write(self.get_contents())
+
+class Types10Reader(HeaderReader):
+ """ Reader for 1.0 types.hal
+
+ The structure of the file is:
+ - preamble
+ - enum OperandType ... {
+ < this becomes the OPERAND section >
+ OEM operands
+ };
+ - comments
+ - enum OperationType ... {
+ < this becomes the OPERATION section >
+ OEM operarions
+ };
+ - rest
+ """
+ BEFORE_OPERAND = 0
+ OPERAND = 1
+ BEFORE_OPERATION = 2
+ OPERATION = 3
+ REST = 4
+
+ def __init__(self):
+ super(Types10Reader, self).__init__()
+ self.read("hardware/interfaces/neuralnetworks/1.0/types.hal")
+
+ def handle_line(self, line):
+ if "enum OperandType" in line:
+ assert self.current == self.BEFORE_OPERAND
+ self.next_section()
+ elif "enum OperationType" in line:
+ assert self.current == self.BEFORE_OPERATION
+ self.next_section()
+ elif "OEM" in line and self.current == self.OPERAND:
+ self.next_section()
+ self.put_back(2)
+ elif "OEM specific" in line and self.current == self.OPERATION:
+ self.next_section()
+ self.put_back(2)
+
+class Types11Reader(HeaderReader):
+ """ Reader for 1.1 types.hal
+
+ The structure of the file is:
+ - preamble
+ - enum OperationType ... {
+ < this becomes the OPERATION section >
+ };
+ - rest
+ """
+
+ BEFORE_OPERATION = 0
+ OPERATION = 1
+ REST = 2
+
+ def __init__(self):
+ super(Types11Reader, self).__init__()
+ self.read("hardware/interfaces/neuralnetworks/1.1/types.hal")
+
+ def handle_line(self, line):
+ if "enum OperationType" in line:
+ assert self.current == self.BEFORE_OPERATION
+ self.next_section()
+ # there is more content after the enums we are interested in so
+ # it cannot be the last line, can match with \n
+ elif line == "};\n":
+ self.next_section()
+ self.put_back()
+
+class NeuralNetworksReader(HeaderReader):
+ """ Reader for NeuralNetworks.h
+
+ The structure of the file is:
+ - preamble
+ - typedef enum {
+ < this becomes the OPERAND section >
+ } OperandCode;
+ - comments
+ - typedef enum {
+ < this becomes the OPERATION_V10 section >
+ // TODO: change to __ANDROID_API__ >= __ANDROID_API_P__ once available.
+ #if __ANDROID_API__ > __ANDROID_API_O_MR1__
+ < this becomes the OPERATION_V11 section >
+ #endif
+ };
+ - rest
+ """
+
+ BEFORE_OPERAND = 0
+ OPERAND = 1
+ BEFORE_OPERATION = 2
+ OPERATION_V10 = 3
+ OPERATION_V11 = 4
+ REST = 5
+
+ def __init__(self):
+ super(NeuralNetworksReader, self).__init__()
+ self.read("frameworks/ml/nn/runtime/include/NeuralNetworks.h")
+
+ def handle_line(self, line):
+ if line == "typedef enum {\n":
+ self.next_section()
+ elif line == "} OperandCode;\n":
+ assert self.current == self.OPERAND
+ self.next_section()
+ self.put_back()
+ elif self.current == self.OPERATION_V10 and "#if __ANDROID_API__ >" in line:
+ self.next_section()
+ # Get rid of the API divider altogether
+ self.put_back(2)
+ self.sections[self.current] = []
+ elif line == "} OperationCode;\n":
+ assert self.current == self.OPERATION_V11
+ self.next_section()
+ self.put_back()
+ # Get rid of API divider #endif
+ self.sections[self.OPERATION_V11].pop()
+
+
+if __name__ == '__main__':
+ # Reset
+ assert os.environ["ANDROID_BUILD_TOP"]
+ os.chdir(os.environ["ANDROID_BUILD_TOP"])
+ subprocess.run(
+ "cd hardware/interfaces/neuralnetworks && git checkout */types.hal",
+ shell=True)
+
+ # Read existing contents
+ types10 = Types10Reader()
+ types11 = Types11Reader()
+ nn = NeuralNetworksReader()
+
+ # Rewrite from header syntax to HAL and replace types.hal contents
+ operand = []
+ for line in nn.sections[nn.OPERAND]:
+ line = line.replace("ANEURALNETWORKS_", "")
+ operand.append(line)
+ types10.sections[types10.OPERAND] = operand
+ def rewrite_operation(from_nn):
+ hal = []
+ for line in from_nn:
+ if "TODO" in line:
+ continue
+
+ # Match multiline comment style
+ if re.match("^ */\*\* \w.*[^/]$", line):
+ hal.append(" /**\n")
+ line = line.replace("/** ", " * ")
+ # Match naming changes in HAL vs framework
+ line = line.replace("@link ANEURALNETWORKS_", "@link OperandType::")
+ line = line.replace("ANEURALNETWORKS_", "")
+ line = line.replace("FuseCode", "FusedActivationFunc")
+ # PaddingCode is not part of HAL, rewrite
+ line = line.replace("{@link PaddingCode} values",
+ "following values: {0 (NONE), 1 (SAME), 2 (VALID)}")
+ hal.append(line)
+ return hal
+ types10.sections[types10.OPERATION] = rewrite_operation(nn.sections[nn.OPERATION_V10])
+ types11.sections[types11.OPERATION] = rewrite_operation(nn.sections[nn.OPERATION_V11])
+
+ # Write synced contents
+ types10.write()
+ types11.write()
+
+ print("")
+ print("The files")
+ print(" " + types10.filename + " and")
+ print(" " + types11.filename)
+ print("have been rewritten")
+ print("")
+ print("Check that the change matches your expectations and regenerate the hashes")
+ print("")