summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2018-04-26 18:35:56 -0700
committerMichael Butler <butlermichael@google.com>2018-04-27 14:42:22 -0700
commitf1a4694344c8acf2b157724ddf7df6ce2d1ac7bb (patch)
treeda10233b42a226fae4dc6aeda656fa4ef8a84d37
parenteb336294144d6e32ee7e5284cd2c51da32df7ef8 (diff)
downloadml-f1a4694344c8acf2b157724ddf7df6ce2d1ac7bb.tar.gz
Sync updated NNAPI doc to HAL -- script
Additionally reformatted text to fit within 80 character line limit for HIDL HALs. Additionally removed "of each addition" from "Specifies the activation to invoke on the result of each addition" because it was was included regardless of the operation. As one example, it was included in CONV_2D, which (although it includes a bias add) was confusing and unclear. Bug: 72650109 Test: None, only documentation change Change-Id: I779654aa30e1dec1f376ca173257e839fc68dc64
-rw-r--r--nn/runtime/include/NeuralNetworks.h1017
-rwxr-xr-xnn/tools/sync_enums_to_hal.py1
2 files changed, 578 insertions, 440 deletions
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 2ec72c904..e0d09ae24 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -72,7 +72,8 @@ typedef enum {
ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
/** A tensor of 32 bit integer values. */
ANEURALNETWORKS_TENSOR_INT32 = 4,
- /** A tensor of 8 bit integers that represent real numbers.
+ /**
+ * A tensor of 8 bit integers that represent real numbers.
*
* Attached to this tensor are two numbers that can be used to convert the
* 8 bit integer to the real value and vice versa. These two numbers are:
@@ -91,18 +92,20 @@ typedef enum {
* The type of operations that can be added to a model.
*/
typedef enum {
- /** Adds two tensors, element-wise.
+ /**
+ * Adds two tensors, element-wise.
*
- * Takes two input tensors of identical {@link OperandCode} and compatible dimensions.
- * The output is the sum of both input tensors, optionally modified by an activation
- * function.
+ * Takes two input tensors of identical {@link OperandCode} and compatible
+ * dimensions. The output is the sum of both input tensors, optionally
+ * modified by an activation function.
*
* Two dimensions are compatible when:
* 1. they are equal, or
* 2. one of them is 1
*
- * The size of the output is the maximum size along each dimension of the input operands.
- * It starts with the trailing dimensions, and works its way forward.
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its
+ * way forward.
*
* Example:
*
@@ -118,18 +121,22 @@ typedef enum {
*
* Inputs:
* * 0: A tensor.
- * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions as input0.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the {@link FuseCode}
- * values. Specifies the activation to invoke on the result of each addition.
+ * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
* * 0: The sum, a tensor of the same {@link OperandCode} as input0.
*/
ANEURALNETWORKS_ADD = 0,
- /** Performs a 2-D average pooling operation.
+ /**
+ * Performs a 2-D average pooling operation.
*
- * The output dimensions are functions of the filter dimensions, stride, and padding.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
*
* The values in the output tensor are computed as:
*
@@ -140,54 +147,63 @@ typedef enum {
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels)
- * data layout.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the left,
- * in the ‘width’ dimension.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the right,
- * in the ‘width’ dimension.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the top,
- * in the ‘height’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the bottom,
- * in the ‘height’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter width.
- * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter height.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * height.
* * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Inputs (implicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit padding scheme,
- * has to be one of the {@link PaddingCode} values.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter width.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter height.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * height.
* * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * * 0: The output 4-D tensor, of shape
+ [batches, out_height, out_width, depth].
*/
ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
- /** Concatenates the input tensors along the given dimension.
+ /**
+ * Concatenates the input tensors along the given dimension.
*
- * The input tensors must have identical {@link OperandCode} and the same dimensions
- * except the dimension along the concatenation axis.
+ * The input tensors must have identical {@link OperandCode} and the same
+ * dimensions except the dimension along the concatenation axis.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -196,23 +212,28 @@ typedef enum {
* Supported tensor rank: up to 4
*
* Inputs:
- * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm].
- * For inputs of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, all
- * input tensors must have the same scale and zeroPoint.
- * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the concatenation axis.
+ * * 0 ~ n-1: The list of n input tensors, of shape
+ * [D0, D1, ..., Daxis(i), ..., Dm]. For inputs of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, all input tensors
+ * must have the same scale and zeroPoint.
+ * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
+ * concatenation axis.
*
* Outputs:
- * * 0: The output, a tensor of the same {@link OperandCode} as the input tensors.
- * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+ * * 0: The output, a tensor of the same {@link OperandCode} as the input
+ * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
*/
ANEURALNETWORKS_CONCATENATION = 2,
- /** Performs an 2-D convolution operation.
+ /**
+ * Performs an 2-D convolution operation.
*
- * The CONV_2D op sweeps a 2-D filter that can mix channels together over a batch of
- * images, applying the filter to each window of each image of the appropriate size.
+ * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
+ * batch of images, applying the filter to each window of each image of the
+ * appropriate size.
*
- * The output dimensions are functions of the filter dimensions, stride, and padding.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
*
* The values in the output tensor are computed as:
*
@@ -232,68 +253,77 @@ typedef enum {
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
- * specifying the filter.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
- * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
- * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the left,
- * in the ‘width’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the right,
- * in the ‘width’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the top,
- * in the ‘height’ dimension.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the bottom,
- * in the ‘height’ dimension.
- * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias
+ * should also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input
+ * tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
+ * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale == input_scale * filter_scale.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
* * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Inputs (implicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
- * specifying the filter.
- * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
- * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
- * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
+ * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
* bias_scale == input_scale * filter_scale.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit padding scheme,
- * has to be one of the {@link PaddingCode} values.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
* * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
- * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied: output_scale > input_scale * filter_scale.
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out]. For output tensor of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition
+ * must be satisfied: output_scale > input_scale * filter_scale.
*/
ANEURALNETWORKS_CONV_2D = 3,
- /** Performs a depthwise 2-D convolution operation.
+ /**
+ * Performs a depthwise 2-D convolution operation.
*
- * Given an input tensor of shape [batches, height, width, depth_in] and a filter
- * tensor of shape [1, filter_height, filter_width, depth_out] containing
- * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different
- * filter to each input channel (expanding from 1 channel to channel_multiplier channels
- * for each), then concatenates the results together.
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [1, filter_height, filter_width, depth_out]
+ * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
+ * applies a different filter to each input channel (expanding from 1
+ * channel to channel_multiplier channels for each), then concatenates the
+ * results together.
*
* The output has depth_out = depth_in * depth_multiplier channels.
- * The output dimensions are functions of the filter dimensions, stride, and padding.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
*
* The values in the output tensor are computed as:
*
@@ -312,72 +342,80 @@ typedef enum {
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
- * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
- * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
- * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
+ * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
* bias_scale == input_scale * filter_scale.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the left,
- * in the ‘width’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the right,
- * in the ‘width’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the top,
- * in the ‘height’ dimension.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the bottom,
- * in the ‘height’ dimension.
- * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise multiplier.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
+ * multiplier.
* * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Inputs (implicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
- * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
- * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
- * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
+ * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
* bias_scale == input_scale * filter_scale.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit padding scheme,
- * has to be one of the {@link PaddingCode} values.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise multiplier.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
+ * multiplier.
* * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
- * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied: output_scale > input_scale * filter_scale.
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out]. For output tensor of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition
+ * must be satisfied: output_scale > input_scale * filter_scale.
*/
ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
- /** Rearranges data from depth into blocks of spatial data.
+ /**
+ * Rearranges data from depth into blocks of spatial data.
*
- * More specifically, this op outputs a copy of the input tensor where values from
- * the depth dimension are moved in spatial blocks to the height and width dimensions.
- * The value block_size indicates the input block size and how the data is moved.
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the depth dimension are moved in spatial blocks to the height
+ * and width dimensions. The value block_size indicates the input block size
+ * and how the data is moved.
*
- * Chunks of data of size block_size * block_size from depth are rearranged into
- * non-overlapping blocks of size block_size x block_size.
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
*
- * The width of the output tensor is input_depth * block_size, whereas the height is
- * input_height * block_size.
- * The depth of the input tensor must be divisible by block_size * block_size
+ * The width of the output tensor is input_depth * block_size, whereas the
+ * height is input_height * block_size. The depth of the input tensor must
+ * be divisible by block_size * block_size
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -386,17 +424,20 @@ typedef enum {
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. block_size
- * must be >=1 and block_size * block_size must be a divisor of the input depth.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size * block_size must be a divisor
+ * of the input depth.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size,
- * depth/(block_size*block_size)].
+ * * 0: The output 4-D tensor, of shape [batch, height*block_size,
+ * width*block_size, depth/(block_size*block_size)].
*/
ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
- /** Dequantizes the input tensor.
+ /**
+ * Dequantizes the input tensor.
*
* The formula is:
*
@@ -416,7 +457,8 @@ typedef enum {
*/
ANEURALNETWORKS_DEQUANTIZE = 6,
- /** Looks up sub-tensors in the input tensor.
+ /**
+ * Looks up sub-tensors in the input tensor.
*
* This operator takes for input a tensor of values (Values) and
* a one-dimensional tensor of selection indices (Lookups).
@@ -448,7 +490,8 @@ typedef enum {
*/
ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
- /** Computes element-wise floor() on the input tensor.
+ /**
+ * Computes element-wise floor() on the input tensor.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -459,13 +502,14 @@ typedef enum {
* * 0: A tensor.
*
* Outputs:
- * * 0: The output tensor, of the same {@link OperandCode} and dimensions as the
- * input tensor.
+ * * 0: The output tensor, of the same {@link OperandCode} and dimensions as
+ * the input tensor.
*/
ANEURALNETWORKS_FLOOR = 8,
- /** Denotes a fully (densely) connected layer, which connects all elements in the input
- * tensor with each element in the output tensor.
+ /**
+ * Denotes a fully (densely) connected layer, which connects all elements
+ * in the input tensor with each element in the output tensor.
*
* This layer implements the operation:
*
@@ -478,31 +522,36 @@ typedef enum {
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor of at least rank 2, specifying the input. If rank is greater than 2,
- * then it gets flattened to a 2-D Tensor. The (flattened) 2-D Tensor is reshaped
- * (if necessary) to [batch_size, input_size], where "input_size" corresponds to
- * the number of inputs to the layer, matching the second dimension of weights, and
- * "batch_size" is calculated by dividing the number of elements by "input_size".
- * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where
- * "num_units" corresponds to the number of output nodes.
- * * 2: A 1-D tensor, of shape [num_units], specifying the bias.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
- * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
- * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
- * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * * 0: A tensor of at least rank 2, specifying the input. If rank is
+ * greater than 2, then it gets flattened to a 2-D Tensor. The
+ * (flattened) 2-D Tensor is reshaped (if necessary) to
+ * [batch_size, input_size], where "input_size" corresponds to the
+ * number of inputs to the layer, matching the second dimension of
+ * weights, and "batch_size" is calculated by dividing the number of
+ * elements by "input_size".
+ * * 1: A 2-D tensor, specifying the weights, of shape
+ * [num_units, input_size], where "num_units" corresponds to the number
+ * of output nodes.
+ * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
+ * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
+ * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
* bias_scale == input_scale * filter_scale.
* * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
- * * 0: The output tensor, of shape [batch_size, num_units].
- * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied: output_scale > input_scale * filter_scale.
+ * * 0: The output tensor, of shape [batch_size, num_units]. For output
+ * tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
+ * condition must be satisfied:
+ * output_scale > input_scale * filter_scale.
*/
ANEURALNETWORKS_FULLY_CONNECTED = 9,
- /** Looks up sub-tensors in the input tensor using a key-value map.
+ /**
+ * Looks up sub-tensors in the input tensor using a key-value map.
*
* This operator takes for input a tensor of values (Values),
* a one-dimensional tensor of selection values (Lookups) and
@@ -529,24 +578,28 @@ typedef enum {
* must be concatenated.
*
* Inputs:
- * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [ k ].
- * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [ n ];
- * Keys and Values pair represent a map, i.e., the ith element
- * in Keys (Keys[i]) is the key to select the ith sub-tensor
- * in Values (Values[i]), where 0 <= i <= n-1.
- * Keys tensor *MUST* be sorted in ascending order.
- * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n.
+ * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with
+ * shape [ k ].
+ * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
+ * [ n ]; Keys and Values pair represent a map, i.e., the ith element
+ * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
+ * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
+ * ascending order.
+ * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
+ * must be n.
*
* Outputs:
* * 0: Output. A tensor with shape [ k …].
* * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
* hits (True) or not (False).
- * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f.
+ * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0
+ * and scale 1.0f.
* A non-zero byte represents True, a hit. A zero indicates otherwise.
*/
ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
- /** Applies L2 normalization along the depth dimension.
+ /**
+ * Applies L2 normalization along the depth dimension.
*
* The values in the output tensor are computed as:
*
@@ -554,31 +607,35 @@ typedef enum {
* input[batch, row, col, channel] /
* sqrt(sum_{c} pow(input[batch, row, col, c], 2))
*
- * For input tensor with more dimensions, independently normalizes each 1-D slice along
- * dimension dim.
+ * For input tensor with more dimensions, independently normalizes each 1-D
+ * slice along dimension dim.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
*
- * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width,
- * and Channels).
+ * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples,
+ * Height, Width, and Channels).
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth].
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
*/
ANEURALNETWORKS_L2_NORMALIZATION = 11,
- /** Performs an 2-D L2 pooling operation.
+ /**
+ * Performs an 2-D L2 pooling operation.
*
- * The output dimensions are functions of the filter dimensions, stride, and padding.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
*
* The values in the output tensor are computed as:
*
* output[batch, row, col, channel] =
- * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1))
+ * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) /
+ * sum(1))
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -588,54 +645,64 @@ typedef enum {
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the left,
- * in the ‘width’ dimension.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the right,
- * in the ‘width’ dimension.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the top,
- * in the ‘height’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the bottom,
- * in the ‘height’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter width.
- * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter height.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * height.
* * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Inputs (implicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit padding scheme,
- * has to be one of the {@link PaddingCode} values.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter width.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter height.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * height.
* * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
*/
ANEURALNETWORKS_L2_POOL_2D = 12,
- /** Applies Local Response Normalization along the depth dimension.
+ /**
+ * Applies Local Response Normalization along the depth dimension.
*
- * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last
- * dimension), and each vector is normalized independently. Within a given vector,
- * each component is divided by the weighted, squared sum of inputs within depth_radius.
+ * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
+ * last dimension), and each vector is normalized independently. Within a
+ * given vector, each component is divided by the weighted, squared sum of
+ * inputs within depth_radius.
*
* The output is calculated using this formula:
*
- * sqr_sum[a, b, c, d] =
- * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)
+ * sqr_sum[a, b, c, d] = sum(
+ * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
* output = input / pow((bias + alpha * sqr_sum), beta)
*
* Supported tensor {@link OperandCode}:
@@ -644,19 +711,24 @@ typedef enum {
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of the
- * normalization window.
- * * 2: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the bias, must not be zero.
- * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scale factor, alpha.
- * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the exponent, beta.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of
+ * the normalization window.
+ * * 2: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the bias, must
+ * not be zero.
+ * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scale
+ * factor, alpha.
+ * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the exponent,
+ * beta.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
*/
ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
- /** Computes sigmoid activation on the input tensor element-wise.
+ /**
+ * Computes sigmoid activation on the input tensor element-wise.
*
* The output is calculated using this formula:
*
@@ -689,18 +761,19 @@ typedef enum {
*
* * 1: Input. Dim.size >= 1, no restriction on DataType.
* * 2: Weight. Optional. Dim.size == 1, DataType: Float.
- * If not set, each input element is considered to have the same weight of
- * 1.0.
+ * If not set, each input element is considered to have the same weight
+ * of 1.0.
* Tensor[1].Dim[0] == Tensor[2].Dim[0]
* * 3: Type:
* Sparse: Value LSHProjectionType_SPARSE(=1).
* Computed bit vector is considered to be sparse.
- * Each output element is an int32 made up of multiple bits computed from
- * hash functions.
+ * Each output element is an int32 made up of multiple bits
+ * computed from hash functions.
*
* Dense: Value LSHProjectionType_DENSE(=2).
- * Computed bit vector is considered to be dense. Each output element
- * represents a bit and can take the value of either 0 or 1.
+ * Computed bit vector is considered to be dense. Each output
+ * element represents a bit and can take the value of either
+ * 0 or 1.
*
* Outputs:
* * 0: If the projection type is sparse:
@@ -720,9 +793,12 @@ typedef enum {
* \f{eqnarray*}{
* i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
* f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
- * C_t =& clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell})& \\
- * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o)& \\
- * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) & if\ there\ is\ a\ projection; \\
+ * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
+ * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
+ * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
+ * & & \\
+ * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
+ * & if\ there\ is\ a\ projection; \\
* h_t =& & \\
* & o_t \odot g(C_t) & otherwise. \\
* \f}
@@ -734,7 +810,8 @@ typedef enum {
* * \f$o_t\f$ is the output,
* * \f$h_t\f$ is the output state,
* * \f$\sigma\f$ is the logistic sigmoid function,
- * * \f$g\f$ is the cell input and cell output activation function, usually \f$tahn\f$,
+ * * \f$g\f$ is the cell input and cell output activation function, usually
+ * \f$tahn\f$,
* * \f$W_{xi}\f$ is the input-to-input weight matrix,
* * \f$W_{hi}\f$ is the recurrent to input weight matrix,
* * \f$W_{ci}\f$ is the cell-to-input weight matrix,
@@ -754,29 +831,32 @@ typedef enum {
* * \f$b_{proj}\f$ is the projection bias,
* * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
* * \f$t_{proj}\f$ is the threshold for clipping the projected output.
- * * \f$\odot\f$ is the <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
+ * * \f$\odot\f$ is the
+ * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
* Hadamard product</a> that takes two matrices and produces another
* matrix, each element of which is the product of the corresponding
* elements of the input matrices.
*
* The operation has the following independently optional inputs:
- * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights (\f$W_{hi}\f$),
- * cell-to-input (\f$W_{ci}\f$) weights, and input gate bias (\f$b_i\f$) either all have values,
- * or none of them have values (i.e., all set to null). If they have no
- * values, coupling of input and forget gates (CIFG) is used, in which case
- * the input gate (\f$i_t\f$) is calculated using the following equation instead.
+ * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
+ * (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
+ * bias (\f$b_i\f$) either all have values, or none of them have values
+ * (i.e., all set to null). If they have no values, coupling of input and
+ * forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
+ * is calculated using the following equation instead.
* \f{eqnarray*}{
* i_t = 1 - f_t
* \f}
- * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output
- * weights (\f$W_{co}\f$) either both have values or neither of them have values.
+ * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
+ * (\f$W_{co}\f$) either both have values or neither of them have values.
* If they have values, the peephole optimization is used. Additionally,
* if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
* required to have values for peephole optimization.
- * * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection
- * layer, and should otherwise have no value.
- * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the
- * recurrent projection layer exists, and should otherwise have no value.
+ * * The projection weights (\f$W_{proj}\f$) is required only for the
+ * recurrent projection layer, and should otherwise have no value.
+ * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
+ * value if the recurrent projection layer exists, and should otherwise
+ * have no value.
*
* References:
*
@@ -788,8 +868,8 @@ typedef enum {
* The peephole implementation and projection layer is based on:
* https://research.google.com/pubs/archive/43905.pdf
* Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
- * recurrent neural network architectures for large scale acoustic modeling."
- * INTERSPEECH, 2014.
+ * recurrent neural network architectures for large scale acoustic
+ * modeling." INTERSPEECH, 2014.
* (However, the concept of peephole optimization was introduced in work
* prior to this paper.)
*
@@ -874,18 +954,18 @@ typedef enum {
* <li>4: Tanh;
* <li>6: Sigmoid.
* </ul>
- * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such that values are bound
- * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is
- * disabled.
- * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the projection layer, such
- * that values are bound within [-proj_clip, proj_clip]. If set to 0.0
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
* then clipping is disabled.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
*
* Outputs:
* * 0: The scratch buffer.
* A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units * 4] with CIFG, or [batch_size, num_units * 3]
- * without CIFG.
+ * [batch_size, num_units * 4] with CIFG, or
+ * [batch_size, num_units * 3] without CIFG.
* * 1: The output state (out) (\f$h_t\f$).
* A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
* [batch_size, output_size].
@@ -899,9 +979,11 @@ typedef enum {
*/
ANEURALNETWORKS_LSTM = 16,
- /** Performs an 2-D max pooling operation.
+ /**
+ * Performs an 2-D max pooling operation.
*
- * The output dimensions are functions of the filter dimensions, stride, and padding.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
*
* The values in the output tensor are computed as:
*
@@ -917,56 +999,66 @@ typedef enum {
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the left,
- * in the ‘width’ dimension.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the right,
- * in the ‘width’ dimension.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the top,
- * in the ‘height’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on the bottom,
- * in the ‘height’ dimension.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter width.
- * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter height.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * height.
* * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Inputs (implicit padding):
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit padding scheme,
- * has to be one of the {@link PaddingCode} values.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘width’ dimension.
- * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when walking
- * through input in the ‘height’ dimension.
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter width.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter height.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
+ * height.
* * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
- * {@link FuseCode} values. Specifies the activation to invoke on the result of
- * each addition.
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
*/
ANEURALNETWORKS_MAX_POOL_2D = 17,
- /** Multiplies two tensors, element-wise.
+ /**
+ * Multiplies two tensors, element-wise.
*
- * Takes two input tensors of identical {@link OperandCode} and compatible dimensions.
- * The output is the product of both input tensors, optionally modified by an
- * activation function.
+ * Takes two input tensors of identical {@link OperandCode} and compatible
+ * dimensions. The output is the product of both input tensors, optionally
+ * modified by an activation function.
*
* Two dimensions are compatible when:
* 1. they are equal, or
* 2. one of them is 1
*
- * The size of the resulting output is the maximum size along each dimension of the
- * input operands. It starts with the trailing dimensions, and works its way forward.
+ * The size of the resulting output is the maximum size along each dimension
+ * of the input operands. It starts with the trailing dimensions, and works
+ * its way forward.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -976,18 +1068,22 @@ typedef enum {
*
* Inputs:
* * 0: A tensor.
- * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions as input0.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the {@link FuseCode}
- * values. Specifies the activation to invoke on the result of each multiplication.
+ * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
* * 0: The product, a tensor of the same {@link OperandCode} as input0.
- * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied: output_scale > input1_scale * input2_scale.
+ * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input1_scale * input2_scale.
*/
ANEURALNETWORKS_MUL = 18,
- /** Computes rectified linear activation on the input tensor element-wise.
+ /**
+ * Computes rectified linear activation on the input tensor element-wise.
*
* The output is calculated using this formula:
*
@@ -1007,7 +1103,8 @@ typedef enum {
*/
ANEURALNETWORKS_RELU = 19,
- /** Computes rectified linear 1 activation on the input tensor element-wise.
+ /**
+ * Computes rectified linear 1 activation on the input tensor element-wise.
*
* The output is calculated using this formula:
*
@@ -1027,7 +1124,8 @@ typedef enum {
*/
ANEURALNETWORKS_RELU1 = 20,
- /** Computes rectified linear 6 activation on the input tensor element-wise.
+ /**
+ * Computes rectified linear 6 activation on the input tensor element-wise.
*
* The output is calculated using this formula:
*
@@ -1047,10 +1145,11 @@ typedef enum {
*/
ANEURALNETWORKS_RELU6 = 21,
- /** Reshapes a tensor.
+ /**
+ * Reshapes a tensor.
*
- * Given tensor, this operation returns a tensor that has the same values as tensor,
- * but with a newly specified shape.
+ * Given tensor, this operation returns a tensor that has the same values as
+ * tensor, but with a newly specified shape.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1060,20 +1159,21 @@ typedef enum {
*
* Inputs:
* * 0: A tensor, specifying the tensor to be reshaped.
- * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the shape
- * of the output tensor. The number of elements implied by shape must be the same
- * as the number of elements in the input tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the
+ * shape of the output tensor. The number of elements implied by shape
+ * must be the same as the number of elements in the input tensor.
*
* Outputs:
* * 0: The output tensor, of shape specified by the input shape.
*/
ANEURALNETWORKS_RESHAPE = 22,
- /** Resizes images to given size using the bilinear interpretation.
+ /**
+ * Resizes images to given size using the bilinear interpretation.
*
- * Resized images must be distorted if their output aspect ratio is not the same as
- * input aspect ratio. The corner pixels of output may not be the same as
- * corner pixels of input.
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1081,14 +1181,16 @@ typedef enum {
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output height of the
- * output tensor.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output width of the
- * output tensor.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * width of the output tensor.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth].
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
*/
ANEURALNETWORKS_RESIZE_BILINEAR = 23,
@@ -1096,7 +1198,8 @@ typedef enum {
* A basic recurrent neural network layer.
*
* This layer implements the operation:
- * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias)
+ * outputs = state = activation(inputs * input_weights +
+ * state * recurrent_weights + bias)
*
* Where:
* * “input_weights” is a weight matrix that multiplies the inputs;
@@ -1130,9 +1233,9 @@ typedef enum {
* A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
* [batch_size, num_units].
* * 5: fused_activation_function.
- * An optional {@link FuseCode} value indicating the activation
- * function. If “NONE” is specified then it results in a linear
- * activation.
+ * An optional {@link FuseCode} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
*
* Outputs:
* * 0: hidden state (out).
@@ -1146,8 +1249,10 @@ typedef enum {
*/
ANEURALNETWORKS_RNN = 24,
- /** Computes the softmax activation on the input tensor element-wise, per batch, by
- * normalizing the input vector so the maximum coefficient is zero.
+ /**
+ * Computes the softmax activation on the input tensor element-wise, per
+ * batch, by normalizing the input vector so the maximum coefficient is
+ * zero.
*
* The output is calculated using this formula:
*
@@ -1173,14 +1278,16 @@ typedef enum {
*/
ANEURALNETWORKS_SOFTMAX = 25,
- /** Rearranges blocks of spatial data, into depth.
+ /**
+ * Rearranges blocks of spatial data, into depth.
*
- * More specifically, this op outputs a copy of the input tensor where values from
- * the height and width dimensions are moved to the depth dimension.
- * The value block_size indicates the input block size and how the data is moved.
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the height and width dimensions are moved to the depth
+ * dimension. The value block_size indicates the input block size and how
+ * the data is moved.
*
- * Chunks of data of size block_size * block_size from depth are rearranged into
- * non-overlapping blocks of size block_size x block_size.
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
*
* The depth of the output tensor is input_depth * block_size * block_size.
* The input tensor's height and width must be divisible by block_size.
@@ -1192,13 +1299,15 @@ typedef enum {
* Supported tensor rank: 4, with "NHWC" data layout.
*
* Inputs:
- * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
- * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. block_size
- * must be >=1 and block_size must be a divisor of both the input height and width.
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size must be a divisor of both the
+ * input height and width.
*
* Outputs:
- * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size,
- * depth*block_size*block_size].
+ * * 0: The output 4-D tensor, of shape [batch, height/block_size,
+ * width/block_size, depth*block_size*block_size].
*/
ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
@@ -1215,21 +1324,22 @@ typedef enum {
* INTERSPEECH, 2015.
*
* It processes the incoming input using a 2-stage filtering mechanism:
- * * stage 1 performs filtering on the "features" dimension, whose outputs get
- * pushed into a memory of fixed-size memory_size.
+ * * stage 1 performs filtering on the "features" dimension, whose outputs
+ * get pushed into a memory of fixed-size memory_size.
* * stage 2 performs filtering on the "time" dimension of the memory_size
* memoized outputs of stage 1.
*
* Specifically, for rank 1, this layer implements the operation:
*
- * memory = push(conv1d(inputs, weights_feature, feature_dim, "ANEURALNETWORKS_PADDING_VALID"));
+ * memory = push(conv1d(inputs, weights_feature, feature_dim,
+ * "ANEURALNETWORKS_PADDING_VALID"));
* outputs = activation(memory * weights_time + bias);
*
* Where:
* * “weights_feature” is a weights matrix that processes the inputs (by
- * convolving the input with every “feature filter”), and whose outputs get
- * pushed, stacked in order, into the fixed-size “memory” (the oldest entry
- * gets dropped);
+ * convolving the input with every “feature filter”), and whose outputs
+ * get pushed, stacked in order, into the fixed-size “memory” (the oldest
+ * entry gets dropped);
* * “weights_time” is a weights matrix that processes the “memory” (by a
* batched matrix multiplication on the num_units);
* * “bias” is an optional bias vector (added to each output vector in the
@@ -1265,8 +1375,9 @@ typedef enum {
* * 5: rank.
* The rank of the SVD approximation.
* * 6: fused_activation_function.
- * An optional {@link FuseCode} value indicating the activation function.
- * If “NONE” is specified then it results in a linear activation.
+ * An optional {@link FuseCode} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
*
* Outputs:
* * 0: state (out).
@@ -1278,7 +1389,8 @@ typedef enum {
*/
ANEURALNETWORKS_SVDF = 27,
- /** Computes hyperbolic tangent of input tensor element-wise.
+ /**
+ * Computes hyperbolic tangent of input tensor element-wise.
*
* The output is calculated using this formula:
*
@@ -1303,9 +1415,10 @@ typedef enum {
/**
* BatchToSpace for N-dimensional tensors.
*
- * This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape
- * block_shape + [batch], interleaves these blocks back into the grid defined by the
- * spatial dimensions [1, ..., M], to obtain a result with the same rank as the input.
+ * This operation reshapes the batch dimension (dimension 0) into M + 1
+ * dimensions of shape block_shape + [batch], interleaves these blocks back
+ * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
+ * result with the same rank as the input.
*
* This is the reverse of SpaceToBatch.
*
@@ -1317,8 +1430,9 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be reshaped
- * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block sizes
- * for each spatial dimension of the input tensor. All values must be >= 1.
+ * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1328,16 +1442,17 @@ typedef enum {
/**
* Element-wise division of two tensors.
*
- * Takes two input tensors of identical {@link OperandCode} and compatible dimensions. The output
- * is the result of dividing the first input tensor by the second, optionally
- * modified by an activation function.
+ * Takes two input tensors of identical {@link OperandCode} and compatible
+ * dimensions. The output is the result of dividing the first input tensor
+ * by the second, optionally modified by an activation function.
*
* Two dimensions are compatible when:
* 1. they are equal, or
* 2. one of them is 1
*
- * The size of the output is the maximum size along each dimension of the input operands.
- * It starts with the trailing dimensions, and works its way forward.
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
*
* Example:
* input1.dimension = {4, 1, 2}
@@ -1351,9 +1466,11 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the first input.
- * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions as input0.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the {@link FuseCode}
- * values. Specifies the activation to invoke on the result of each division.
+ * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1363,12 +1480,13 @@ typedef enum {
/**
* Computes the mean of elements across dimensions of a tensor.
*
- * Reduces the input tensor along the given dimensions to reduce. Unless keep_dims
- * is true, the rank of the tensor is reduced by 1 for each entry in axis.
- * If keep_dims is true, the reduced dimensions are retained with length 1.
+ * Reduces the input tensor along the given dimensions to reduce. Unless
+ * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
+ * in axis. If keep_dims is true, the reduced dimensions are retained with
+ * length 1.
*
- * If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with
- * a single element is returned.
+ * If dimensions to reduce have no entries, all dimensions are reduced, and
+ * a tensor with a single element is returned.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1378,11 +1496,11 @@ typedef enum {
*
* Inputs:
* * 0: A tensor, specifying the input.
- * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions to
- * reduce. If None (the default), reduces all dimensions. Must be in the range
- * [-rank(input_tensor), rank(input_tensor)).
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive, retains reduced
- * dimensions with length 1.
+ * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. If None (the default), reduces all dimensions. Must be in
+ * the range [-rank(input_tensor), rank(input_tensor)).
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
+ * retains reduced dimensions with length 1.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1402,11 +1520,13 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be padded.
- * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings for each
- * spatial dimension of the input tensor. The shape of the tensor must be
- * {rank(input0), 2}.
- * padding[i, 0] specifies the number of element to be padded in the front of dimension i.
- * padding[i, 1] specifies the number of element to be padded after the end of dimension i.
+ * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of element to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the
+ * end of dimension i.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1417,12 +1537,13 @@ typedef enum {
/**
* SpaceToBatch for N-Dimensional tensors.
*
- * This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks
- * of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that
- * in the output, the spatial dimensions [1, ..., M] correspond to the position within the grid,
- * and the batch dimension combines both the position within a spatial block and the original
- * batch position. Prior to division into blocks, the spatial dimensions of the input are
- * optionally zero padded according to paddings.
+ * This operation divides "spatial" dimensions [1, ..., M] of the input into
+ * a grid of blocks of shape block_shape, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial dimensions
+ * [1, ..., M] correspond to the position within the grid, and the batch
+ * dimension combines both the position within a spatial block and the
+ * original batch position. Prior to division into blocks, the spatial
+ * dimensions of the input are optionally zero padded according to paddings.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1432,13 +1553,16 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the input.
- * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block sizes for each
- * spatial dimension of the input tensor. All values must be >= 1.
- * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings for each spatial
- * dimension of the input tensor. All values must be >= 0. The shape of the tensor must be
- * {rank(input0), 2}.
- * padding[i, 0] specifies the number of element to be padded in the front of dimension i.
- * padding[i, 1] specifies the number of element to be padded after the end of dimension i.
+ * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. All values must be
+ * >= 0. The shape of the tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of element to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the
+ * end of dimension i.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1448,9 +1572,10 @@ typedef enum {
/**
* Removes dimensions of size 1 from the shape of a tensor.
*
- * Given a tensor input, this operation returns a tensor of the same {@link OperandCode}
- * with all dimensions of size 1 removed. If you don't want to remove all size 1
- * dimensions, you can remove specific size 1 dimensions by specifying the axes (input1).
+ * Given a tensor input, this operation returns a tensor of the same
+ * {@link OperandCode} with all dimensions of size 1 removed. If you don't
+ * want to remove all size 1 dimensions, you can remove specific size 1
+ * dimensions by specifying the axes (input1).
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1460,24 +1585,27 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, the tensor to be squeezed.
- * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions to
- * squeeze. If specified only squeezes the dimensions listed. Otherwise, squeezes all
- * dimensions. The dimension index starts at 0. An error must be reported if squeezing a
- * dimension that is not 1.
+ * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
+ * dimensions to squeeze. If specified only squeezes the dimensions
+ * listed. Otherwise, squeezes all dimensions. The dimension index
+ * starts at 0. An error must be reported if squeezing a dimension that
+ * is not 1.
*
* Outputs:
- * * 0: A tensor of the same {@link OperandCode} as input0. Contains the same data as input,
- * but has one or more dimensions of size 1 removed.
+ * * 0: A tensor of the same {@link OperandCode} as input0. Contains the
+ * same data as input, but has one or more dimensions of size 1
+ * removed.
*/
ANEURALNETWORKS_SQUEEZE = 34,
/**
* Extracts a strided slice of a tensor.
*
- * Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given
- * input tensor. Starting at the location specified by begin the slice continues by adding
- * stride to the index until all dimensions are not less than end. Note that a stride can
- * be negative, which causes a reverse slice.
+ * Roughly speaking, this op extracts a slice of size (end - begin) / stride
+ * from the given input tensor. Starting at the location specified by begin
+ * the slice continues by adding stride to the index until all dimensions
+ * are not less than end. Note that a stride can be negative, which causes a
+ * reverse slice.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
@@ -1487,21 +1615,25 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be sliced.
- * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of the
- * dimensions of the input tensor to be sliced. The length must be of rank(input0).
- * * 2: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of the
- * dimensions of the input tensor to be sliced. The length must be of rank(input0).
- * * 3: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of the
- * dimensions of the input tensor to be sliced. The length must be of rank(input0).
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit of begin_mask
- * is set, begin[i] is ignored and the fullest possible range in that dimension is
- * used instead.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of end_mask is set,
- * end[i] is ignored and the fullest possible range in that dimension is used instead.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32 mask. If the ith
- * bit of shrink_axis_mask is set, it implies that the ith specification shrinks the
- * dimensionality by 1. A slice of size 1 starting from begin[i] in the dimension must
- * be preserved.
+ * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
+ * the dimensions of the input tensor to be sliced. The length must be
+ * of rank(input0).
+ * * 2: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
+ * the dimensions of the input tensor to be sliced. The length must be
+ * of rank(input0).
+ * * 3: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
+ * the dimensions of the input tensor to be sliced. The length must be
+ * of rank(input0).
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
+ * of begin_mask is set, begin[i] is ignored and the fullest possible
+ * range in that dimension is used instead.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
+ * end_mask is set, end[i] is ignored and the fullest possible range in
+ * that dimension is used instead.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
+ * mask. If the ith bit of shrink_axis_mask is set, it implies that the
+ * ith specification shrinks the dimensionality by 1. A slice of size 1
+ * starting from begin[i] in the dimension must be preserved.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1511,16 +1643,17 @@ typedef enum {
/**
* Element-wise subtraction of two tensors.
*
- * Takes two input tensors of identical {@link OperandCode} and compatible dimensions.
- * The output is the result of subtracting the second input tensor from the first one,
- * optionally modified by an activation function.
+ * Takes two input tensors of identical {@link OperandCode} and compatible
+ * dimensions. The output is the result of subtracting the second input
+ * tensor from the first one, optionally modified by an activation function.
*
* Two dimensions are compatible when:
* 1. they are equal, or
* 2. one of them is 1
*
- * The size of the output is the maximum size along each dimension of the input operands.
- * It starts with the trailing dimensions, and works its way forward.
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
*
* Example:
* input1.dimension = {4, 1, 2}
@@ -1534,9 +1667,11 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the first input.
- * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions as input0.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the {@link FuseCode}
- * values. Specifies the activation to invoke on the result of each subtraction.
+ * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
@@ -1544,11 +1679,13 @@ typedef enum {
ANEURALNETWORKS_SUB = 36,
/**
- * Transposes the input tensor, permuting the dimensions according to the perm tensor.
+ * Transposes the input tensor, permuting the dimensions according to the
+ * perm tensor.
*
- * The returned tensor's dimension i corresponds to the input dimension perm[i].
- * If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor.
- * Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors.
+ * The returned tensor's dimension i corresponds to the input dimension
+ * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
+ * rank of the input tensor. Hence by default, this operation performs a
+ * regular matrix transpose on 2-D input Tensors.
*
* Supported tensor {@link OperandCode}:
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
diff --git a/nn/tools/sync_enums_to_hal.py b/nn/tools/sync_enums_to_hal.py
index f73d95947..955c367f3 100755
--- a/nn/tools/sync_enums_to_hal.py
+++ b/nn/tools/sync_enums_to_hal.py
@@ -226,6 +226,7 @@ if __name__ == '__main__':
hal.append(" /**\n")
line = line.replace("/** ", " * ")
# Match naming changes in HAL vs framework
+ line = line.replace("@link OperandCode", "@link OperandType")
line = line.replace("@link ANEURALNETWORKS_", "@link OperandType::")
line = line.replace("ANEURALNETWORKS_", "")
line = line.replace("FuseCode", "FusedActivationFunc")