aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--BUILD.bazel5
-rw-r--r--CMakeLists.txt5
-rw-r--r--include/xnnpack.h129
-rw-r--r--src/runtime.c154
-rw-r--r--src/subgraph-strings.c10
-rw-r--r--src/subgraph/divide.c89
-rw-r--r--src/subgraph/maximum2.c64
-rw-r--r--src/subgraph/minimum2.c64
-rw-r--r--src/subgraph/squared-difference.c64
-rw-r--r--src/subgraph/subtract.c89
-rw-r--r--src/xnnpack/subgraph.h7
11 files changed, 679 insertions, 1 deletions
diff --git a/BUILD.bazel b/BUILD.bazel
index 7c8297d2a..c8f31f70d 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -79,14 +79,19 @@ SUBGRAPH_SRCS = [
"src/subgraph/convolution-2d.c",
"src/subgraph/deconvolution-2d.c",
"src/subgraph/depthwise-convolution-2d.c",
+ "src/subgraph/divide.c",
"src/subgraph/fully-connected.c",
"src/subgraph/hardswish.c",
"src/subgraph/max-pooling-2d.c",
+ "src/subgraph/maximum2.c",
+ "src/subgraph/minimum2.c",
"src/subgraph/multiply2.c",
"src/subgraph/prelu.c",
"src/subgraph/sigmoid.c",
"src/subgraph/softmax.c",
"src/subgraph/static-constant-pad.c",
+ "src/subgraph/squared-difference.c",
+ "src/subgraph/subtract.c",
"src/subgraph/unpooling-2d.c",
]
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f88c7f512..de584eb3a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -186,14 +186,19 @@ SET(XNNPACK_SUBGRAPH_SRCS
src/subgraph/convolution-2d.c
src/subgraph/deconvolution-2d.c
src/subgraph/depthwise-convolution-2d.c
+ src/subgraph/divide.c
src/subgraph/fully-connected.c
src/subgraph/hardswish.c
src/subgraph/max-pooling-2d.c
+ src/subgraph/maximum2.c
+ src/subgraph/minimum2.c
src/subgraph/multiply2.c
src/subgraph/prelu.c
src/subgraph/sigmoid.c
src/subgraph/softmax.c
src/subgraph/static-constant-pad.c
+ src/subgraph/squared-difference.c
+ src/subgraph/subtract.c
src/subgraph/unpooling-2d.c)
SET(XNNPACK_LOGGING_SRCS
diff --git a/include/xnnpack.h b/include/xnnpack.h
index a89c84c84..705af8730 100644
--- a/include/xnnpack.h
+++ b/include/xnnpack.h
@@ -612,6 +612,135 @@ enum xnn_status xnn_define_multiply2(
uint32_t output_id,
uint32_t flags);
+/// Define a Subtract Node and add it to a Subgraph.
+///
+/// The Subtract Node computes elementwise subtraction of two tensor inputs with numpy broadcasting rules.
+///
+/// @param subgraph - a Subgraph object that will own the created Node.
+/// @param output_min - lower bound for clipping output values.
+/// @param output_max - upper bound for clipping output values.
+/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
+/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
+/// of the two inputs.
+/// @param flags - binary features of the Subtract Node. No supported flags are currently defined.
+enum xnn_status xnn_define_subtract(
+ xnn_subgraph_t subgraph,
+ float output_min,
+ float output_max,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags);
+
+/// Define a Divide Node and add it to a Subgraph.
+///
+/// The Divide Node computes elementwise division of two tensor inputs with numpy broadcasting rules.
+///
+/// @param subgraph - a Subgraph object that will own the created Node.
+/// @param output_min - lower bound for clipping output values.
+/// @param output_max - upper bound for clipping output values.
+/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
+/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
+/// of the two inputs.
+/// @param flags - binary features of the Divide Node. No supported flags are currently defined.
+enum xnn_status xnn_define_divide(
+ xnn_subgraph_t subgraph,
+ float output_min,
+ float output_max,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags);
+
+/// Define a 2-Input Maximum Node and add it to a Subgraph.
+///
+/// The 2-Input Maximum Node computes elementwise maximum of two tensor inputs with numpy broadcasting rules.
+///
+/// @param subgraph - a Subgraph object that will own the created Node.
+/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
+/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
+/// of the two inputs.
+/// @param flags - binary features of the Maximum Node. No supported flags are currently defined.
+enum xnn_status xnn_define_maximum2(
+ xnn_subgraph_t subgraph,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags);
+
+/// Define a 2-Input Minimum Node and add it to a Subgraph.
+///
+/// The 2-Input Minimum Node computes elementwise minimum of two tensor inputs with numpy broadcasting rules.
+///
+/// @param subgraph - a Subgraph object that will own the created Node.
+/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
+/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
+/// of the two inputs.
+/// @param flags - binary features of the Minimum Node. No supported flags are currently defined.
+enum xnn_status xnn_define_minimum2(
+ xnn_subgraph_t subgraph,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags);
+
+/// Define a Squared Difference Node and add it to a Subgraph.
+///
+/// The Squared Difference Node computes elementwise squared difference of two tensor inputs with numpy broadcasting
+/// rules.
+///
+/// @param subgraph - a Subgraph object that will own the created Node.
+/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the second
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in
+/// the @a subgraph with each dimension either equal to the corresponding dimension of the first
+/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along
+/// that dimension.
+/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined
+/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension
+/// of the two inputs.
+/// @param flags - binary features of the Squared Difference Node. No supported flags are currently defined.
+enum xnn_status xnn_define_squared_difference(
+ xnn_subgraph_t subgraph,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags);
+
/// Define a Constant Pad Node with static padding specification and add it to a Subgraph.
///
/// @param subgraph - a Subgraph object that will own the created Node.
diff --git a/src/runtime.c b/src/runtime.c
index 1f5ca73b3..7b57d4696 100644
--- a/src/runtime.c
+++ b/src/runtime.c
@@ -283,6 +283,23 @@ enum xnn_status xnn_create_runtime_v2(
runtime->opdata[i].inputs[0] = node->inputs[0];
runtime->opdata[i].outputs[0] = node->outputs[0];
break;
+ case xnn_node_type_divide:
+ status = xnn_create_divide_nd_f32(
+ node->activation.output_min,
+ node->activation.output_max,
+ node->flags,
+ &runtime->opdata[i].operator_object);
+ if (status != xnn_status_success) {
+ goto error;
+ }
+ runtime->opdata[i].shape1.num_dims = values[node->inputs[0]].shape.num_dims;
+ runtime->opdata[i].shape2.num_dims = values[node->inputs[1]].shape.num_dims;
+ memcpy(runtime->opdata[i].shape1.dim, values[node->inputs[0]].shape.dim, values[node->inputs[0]].shape.num_dims * sizeof(size_t));
+ memcpy(runtime->opdata[i].shape2.dim, values[node->inputs[1]].shape.dim, values[node->inputs[1]].shape.num_dims * sizeof(size_t));
+ runtime->opdata[i].inputs[0] = node->inputs[0];
+ runtime->opdata[i].inputs[1] = node->inputs[1];
+ runtime->opdata[i].outputs[0] = node->outputs[0];
+ break;
case xnn_node_type_fully_connected:
{
const size_t num_input_elements = product_all_dims(&values[node->inputs[0]].shape);
@@ -349,6 +366,36 @@ enum xnn_status xnn_create_runtime_v2(
runtime->opdata[i].inputs[0] = node->inputs[0];
runtime->opdata[i].outputs[0] = node->outputs[0];
break;
+ case xnn_node_type_maximum2:
+ status = xnn_create_maximum_nd_f32(
+ node->flags,
+ &runtime->opdata[i].operator_object);
+ if (status != xnn_status_success) {
+ goto error;
+ }
+ runtime->opdata[i].shape1.num_dims = values[node->inputs[0]].shape.num_dims;
+ runtime->opdata[i].shape2.num_dims = values[node->inputs[1]].shape.num_dims;
+ memcpy(runtime->opdata[i].shape1.dim, values[node->inputs[0]].shape.dim, values[node->inputs[0]].shape.num_dims * sizeof(size_t));
+ memcpy(runtime->opdata[i].shape2.dim, values[node->inputs[1]].shape.dim, values[node->inputs[1]].shape.num_dims * sizeof(size_t));
+ runtime->opdata[i].inputs[0] = node->inputs[0];
+ runtime->opdata[i].inputs[1] = node->inputs[1];
+ runtime->opdata[i].outputs[0] = node->outputs[0];
+ break;
+ case xnn_node_type_minimum2:
+ status = xnn_create_minimum_nd_f32(
+ node->flags,
+ &runtime->opdata[i].operator_object);
+ if (status != xnn_status_success) {
+ goto error;
+ }
+ runtime->opdata[i].shape1.num_dims = values[node->inputs[0]].shape.num_dims;
+ runtime->opdata[i].shape2.num_dims = values[node->inputs[1]].shape.num_dims;
+ memcpy(runtime->opdata[i].shape1.dim, values[node->inputs[0]].shape.dim, values[node->inputs[0]].shape.num_dims * sizeof(size_t));
+ memcpy(runtime->opdata[i].shape2.dim, values[node->inputs[1]].shape.dim, values[node->inputs[1]].shape.num_dims * sizeof(size_t));
+ runtime->opdata[i].inputs[0] = node->inputs[0];
+ runtime->opdata[i].inputs[1] = node->inputs[1];
+ runtime->opdata[i].outputs[0] = node->outputs[0];
+ break;
case xnn_node_type_multiply2:
status = xnn_create_multiply_nd_f32(
node->activation.output_min,
@@ -409,6 +456,38 @@ enum xnn_status xnn_create_runtime_v2(
runtime->opdata[i].inputs[0] = node->inputs[0];
runtime->opdata[i].outputs[0] = node->outputs[0];
break;
+ case xnn_node_type_squared_difference:
+ status = xnn_create_squared_difference_nd_f32(
+ node->flags,
+ &runtime->opdata[i].operator_object);
+ if (status != xnn_status_success) {
+ goto error;
+ }
+ runtime->opdata[i].shape1.num_dims = values[node->inputs[0]].shape.num_dims;
+ runtime->opdata[i].shape2.num_dims = values[node->inputs[1]].shape.num_dims;
+ memcpy(runtime->opdata[i].shape1.dim, values[node->inputs[0]].shape.dim, values[node->inputs[0]].shape.num_dims * sizeof(size_t));
+ memcpy(runtime->opdata[i].shape2.dim, values[node->inputs[1]].shape.dim, values[node->inputs[1]].shape.num_dims * sizeof(size_t));
+ runtime->opdata[i].inputs[0] = node->inputs[0];
+ runtime->opdata[i].inputs[1] = node->inputs[1];
+ runtime->opdata[i].outputs[0] = node->outputs[0];
+ break;
+ case xnn_node_type_subtract:
+ status = xnn_create_subtract_nd_f32(
+ node->activation.output_min,
+ node->activation.output_max,
+ node->flags,
+ &runtime->opdata[i].operator_object);
+ if (status != xnn_status_success) {
+ goto error;
+ }
+ runtime->opdata[i].shape1.num_dims = values[node->inputs[0]].shape.num_dims;
+ runtime->opdata[i].shape2.num_dims = values[node->inputs[1]].shape.num_dims;
+ memcpy(runtime->opdata[i].shape1.dim, values[node->inputs[0]].shape.dim, values[node->inputs[0]].shape.num_dims * sizeof(size_t));
+ memcpy(runtime->opdata[i].shape2.dim, values[node->inputs[1]].shape.dim, values[node->inputs[1]].shape.num_dims * sizeof(size_t));
+ runtime->opdata[i].inputs[0] = node->inputs[0];
+ runtime->opdata[i].inputs[1] = node->inputs[1];
+ runtime->opdata[i].outputs[0] = node->outputs[0];
+ break;
case xnn_node_type_unpooling_2d:
status = xnn_create_unpooling2d_nhwc_x32(
node->params.pooling_2d.padding_top,
@@ -623,6 +702,21 @@ enum xnn_status xnn_setup_runtime(
runtime->blobs[opdata->outputs[0]].data,
runtime->threadpool);
break;
+ case xnn_operator_type_divide_nd_f32:
+ assert(runtime->blobs[opdata->inputs[0]].data != NULL);
+ assert(runtime->blobs[opdata->inputs[1]].data != NULL);
+ assert(runtime->blobs[opdata->outputs[0]].data != NULL);
+ status = xnn_setup_divide_nd_f32(
+ opdata->operator_object,
+ opdata->shape1.num_dims,
+ opdata->shape1.dim,
+ opdata->shape2.num_dims,
+ opdata->shape2.dim,
+ runtime->blobs[opdata->inputs[0]].data,
+ runtime->blobs[opdata->inputs[1]].data,
+ runtime->blobs[opdata->outputs[0]].data,
+ runtime->threadpool);
+ break;
case xnn_operator_type_fully_connected_nc_f32:
assert(runtime->blobs[opdata->inputs[0]].data != NULL);
assert(runtime->blobs[opdata->outputs[0]].data != NULL);
@@ -655,6 +749,36 @@ enum xnn_status xnn_setup_runtime(
runtime->blobs[opdata->outputs[0]].data,
runtime->threadpool);
break;
+ case xnn_operator_type_maximum_nd_f32:
+ assert(runtime->blobs[opdata->inputs[0]].data != NULL);
+ assert(runtime->blobs[opdata->inputs[1]].data != NULL);
+ assert(runtime->blobs[opdata->outputs[0]].data != NULL);
+ status = xnn_setup_maximum_nd_f32(
+ opdata->operator_object,
+ opdata->shape1.num_dims,
+ opdata->shape1.dim,
+ opdata->shape2.num_dims,
+ opdata->shape2.dim,
+ runtime->blobs[opdata->inputs[0]].data,
+ runtime->blobs[opdata->inputs[1]].data,
+ runtime->blobs[opdata->outputs[0]].data,
+ runtime->threadpool);
+ break;
+ case xnn_operator_type_minimum_nd_f32:
+ assert(runtime->blobs[opdata->inputs[0]].data != NULL);
+ assert(runtime->blobs[opdata->inputs[1]].data != NULL);
+ assert(runtime->blobs[opdata->outputs[0]].data != NULL);
+ status = xnn_setup_minimum_nd_f32(
+ opdata->operator_object,
+ opdata->shape1.num_dims,
+ opdata->shape1.dim,
+ opdata->shape2.num_dims,
+ opdata->shape2.dim,
+ runtime->blobs[opdata->inputs[0]].data,
+ runtime->blobs[opdata->inputs[1]].data,
+ runtime->blobs[opdata->outputs[0]].data,
+ runtime->threadpool);
+ break;
case xnn_operator_type_multiply_nd_f32:
assert(runtime->blobs[opdata->inputs[0]].data != NULL);
assert(runtime->blobs[opdata->inputs[1]].data != NULL);
@@ -700,6 +824,36 @@ enum xnn_status xnn_setup_runtime(
runtime->blobs[opdata->outputs[0]].data,
runtime->threadpool);
break;
+ case xnn_operator_type_squared_difference_nd_f32:
+ assert(runtime->blobs[opdata->inputs[0]].data != NULL);
+ assert(runtime->blobs[opdata->inputs[1]].data != NULL);
+ assert(runtime->blobs[opdata->outputs[0]].data != NULL);
+ status = xnn_setup_squared_difference_nd_f32(
+ opdata->operator_object,
+ opdata->shape1.num_dims,
+ opdata->shape1.dim,
+ opdata->shape2.num_dims,
+ opdata->shape2.dim,
+ runtime->blobs[opdata->inputs[0]].data,
+ runtime->blobs[opdata->inputs[1]].data,
+ runtime->blobs[opdata->outputs[0]].data,
+ runtime->threadpool);
+ break;
+ case xnn_operator_type_subtract_nd_f32:
+ assert(runtime->blobs[opdata->inputs[0]].data != NULL);
+ assert(runtime->blobs[opdata->inputs[1]].data != NULL);
+ assert(runtime->blobs[opdata->outputs[0]].data != NULL);
+ status = xnn_setup_subtract_nd_f32(
+ opdata->operator_object,
+ opdata->shape1.num_dims,
+ opdata->shape1.dim,
+ opdata->shape2.num_dims,
+ opdata->shape2.dim,
+ runtime->blobs[opdata->inputs[0]].data,
+ runtime->blobs[opdata->inputs[1]].data,
+ runtime->blobs[opdata->outputs[0]].data,
+ runtime->threadpool);
+ break;
case xnn_operator_type_unpooling_nhwc_x32:
assert(runtime->blobs[opdata->inputs[0]].data != NULL);
assert(runtime->blobs[opdata->inputs[1]].data != NULL);
diff --git a/src/subgraph-strings.c b/src/subgraph-strings.c
index f141cd341..0417f0bf9 100644
--- a/src/subgraph-strings.c
+++ b/src/subgraph-strings.c
@@ -34,10 +34,16 @@ const char* xnn_node_type_to_string(enum xnn_node_type type) {
return "Deconvolution 2D";
case xnn_node_type_depthwise_convolution_2d:
return "Depthwise Convolution 2D";
+ case xnn_node_type_divide:
+ return "Divide";
case xnn_node_type_fully_connected:
return "Fully Connected";
case xnn_node_type_hardswish:
return "HardSwish";
+ case xnn_node_type_maximum2:
+ return "Maximum2";
+ case xnn_node_type_minimum2:
+ return "Minimum2";
case xnn_node_type_multiply2:
return "Multiply2";
case xnn_node_type_max_pooling_2d:
@@ -48,6 +54,10 @@ const char* xnn_node_type_to_string(enum xnn_node_type type) {
return "Sigmoid";
case xnn_node_type_softmax:
return "Softmax";
+ case xnn_node_type_squared_difference:
+ return "Squared Difference";
+ case xnn_node_type_subtract:
+ return "Subtract";
case xnn_node_type_unpooling_2d:
return "Unpooling 2D";
}
diff --git a/src/subgraph/divide.c b/src/subgraph/divide.c
new file mode 100644
index 000000000..a8bf43229
--- /dev/null
+++ b/src/subgraph/divide.c
@@ -0,0 +1,89 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack.h>
+#include <xnnpack/log.h>
+#include <xnnpack/params.h>
+#include <xnnpack/subgraph.h>
+
+
+enum xnn_status xnn_define_divide(
+ xnn_subgraph_t subgraph,
+ float output_min,
+ float output_max,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags)
+{
+ if (!xnn_params.initialized) {
+ xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+ xnn_node_type_to_string(xnn_node_type_divide));
+ return xnn_status_uninitialized;
+ }
+
+ if (isnan(output_min)) {
+ xnn_log_error(
+ "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+ xnn_node_type_to_string(xnn_node_type_divide));
+ return xnn_status_invalid_parameter;
+ }
+
+ if (isnan(output_max)) {
+ xnn_log_error(
+ "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+ xnn_node_type_to_string(xnn_node_type_divide));
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_min >= output_max) {
+ xnn_log_error(
+ "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+ xnn_node_type_to_string(xnn_node_type_divide), output_min, output_max);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input1_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_divide), input1_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input2_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_divide), input2_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_divide), output_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ struct xnn_node* node = xnn_subgraph_new_node(subgraph);
+ if (node == NULL) {
+ return xnn_status_out_of_memory;
+ }
+
+ node->type = xnn_node_type_divide;
+ node->activation.output_min = output_min;
+ node->activation.output_max = output_max;
+ node->num_inputs = 2;
+ node->inputs[0] = input1_id;
+ node->inputs[1] = input2_id;
+ node->num_outputs = 1;
+ node->outputs[0] = output_id;
+ node->flags = flags;
+
+ return xnn_status_success;
+}
diff --git a/src/subgraph/maximum2.c b/src/subgraph/maximum2.c
new file mode 100644
index 000000000..f8fd7970c
--- /dev/null
+++ b/src/subgraph/maximum2.c
@@ -0,0 +1,64 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack.h>
+#include <xnnpack/log.h>
+#include <xnnpack/params.h>
+#include <xnnpack/subgraph.h>
+
+
+enum xnn_status xnn_define_maximum2(
+ xnn_subgraph_t subgraph,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags)
+{
+ if (!xnn_params.initialized) {
+ xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+ xnn_node_type_to_string(xnn_node_type_maximum2));
+ return xnn_status_uninitialized;
+ }
+
+ if (input1_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_maximum2), input1_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input2_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_maximum2), input2_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_maximum2), output_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ struct xnn_node* node = xnn_subgraph_new_node(subgraph);
+ if (node == NULL) {
+ return xnn_status_out_of_memory;
+ }
+
+ node->type = xnn_node_type_maximum2;
+ node->num_inputs = 2;
+ node->inputs[0] = input1_id;
+ node->inputs[1] = input2_id;
+ node->num_outputs = 1;
+ node->outputs[0] = output_id;
+ node->flags = flags;
+
+ return xnn_status_success;
+}
diff --git a/src/subgraph/minimum2.c b/src/subgraph/minimum2.c
new file mode 100644
index 000000000..f0c27549d
--- /dev/null
+++ b/src/subgraph/minimum2.c
@@ -0,0 +1,64 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack.h>
+#include <xnnpack/log.h>
+#include <xnnpack/params.h>
+#include <xnnpack/subgraph.h>
+
+
+enum xnn_status xnn_define_minimum2(
+ xnn_subgraph_t subgraph,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags)
+{
+ if (!xnn_params.initialized) {
+ xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+ xnn_node_type_to_string(xnn_node_type_minimum2));
+ return xnn_status_uninitialized;
+ }
+
+ if (input1_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_minimum2), input1_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input2_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_minimum2), input2_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_minimum2), output_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ struct xnn_node* node = xnn_subgraph_new_node(subgraph);
+ if (node == NULL) {
+ return xnn_status_out_of_memory;
+ }
+
+ node->type = xnn_node_type_minimum2;
+ node->num_inputs = 2;
+ node->inputs[0] = input1_id;
+ node->inputs[1] = input2_id;
+ node->num_outputs = 1;
+ node->outputs[0] = output_id;
+ node->flags = flags;
+
+ return xnn_status_success;
+}
diff --git a/src/subgraph/squared-difference.c b/src/subgraph/squared-difference.c
new file mode 100644
index 000000000..59d5ed509
--- /dev/null
+++ b/src/subgraph/squared-difference.c
@@ -0,0 +1,64 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack.h>
+#include <xnnpack/log.h>
+#include <xnnpack/params.h>
+#include <xnnpack/subgraph.h>
+
+
+enum xnn_status xnn_define_squared_difference(
+ xnn_subgraph_t subgraph,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags)
+{
+ if (!xnn_params.initialized) {
+ xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+ xnn_node_type_to_string(xnn_node_type_squared_difference));
+ return xnn_status_uninitialized;
+ }
+
+ if (input1_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_squared_difference), input1_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input2_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_squared_difference), input2_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_squared_difference), output_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ struct xnn_node* node = xnn_subgraph_new_node(subgraph);
+ if (node == NULL) {
+ return xnn_status_out_of_memory;
+ }
+
+ node->type = xnn_node_type_squared_difference;
+ node->num_inputs = 2;
+ node->inputs[0] = input1_id;
+ node->inputs[1] = input2_id;
+ node->num_outputs = 1;
+ node->outputs[0] = output_id;
+ node->flags = flags;
+
+ return xnn_status_success;
+}
diff --git a/src/subgraph/subtract.c b/src/subgraph/subtract.c
new file mode 100644
index 000000000..817c4da90
--- /dev/null
+++ b/src/subgraph/subtract.c
@@ -0,0 +1,89 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack.h>
+#include <xnnpack/log.h>
+#include <xnnpack/params.h>
+#include <xnnpack/subgraph.h>
+
+
+enum xnn_status xnn_define_subtract(
+ xnn_subgraph_t subgraph,
+ float output_min,
+ float output_max,
+ uint32_t input1_id,
+ uint32_t input2_id,
+ uint32_t output_id,
+ uint32_t flags)
+{
+ if (!xnn_params.initialized) {
+ xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+ xnn_node_type_to_string(xnn_node_type_subtract));
+ return xnn_status_uninitialized;
+ }
+
+ if (isnan(output_min)) {
+ xnn_log_error(
+ "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+ xnn_node_type_to_string(xnn_node_type_subtract));
+ return xnn_status_invalid_parameter;
+ }
+
+ if (isnan(output_max)) {
+ xnn_log_error(
+ "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+ xnn_node_type_to_string(xnn_node_type_subtract));
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_min >= output_max) {
+ xnn_log_error(
+ "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+ xnn_node_type_to_string(xnn_node_type_subtract), output_min, output_max);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input1_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_subtract), input1_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (input2_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_subtract), input2_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ if (output_id >= subgraph->num_values) {
+ xnn_log_error(
+ "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+ xnn_node_type_to_string(xnn_node_type_subtract), output_id);
+ return xnn_status_invalid_parameter;
+ }
+
+ struct xnn_node* node = xnn_subgraph_new_node(subgraph);
+ if (node == NULL) {
+ return xnn_status_out_of_memory;
+ }
+
+ node->type = xnn_node_type_subtract;
+ node->activation.output_min = output_min;
+ node->activation.output_max = output_max;
+ node->num_inputs = 2;
+ node->inputs[0] = input1_id;
+ node->inputs[1] = input2_id;
+ node->num_outputs = 1;
+ node->outputs[0] = output_id;
+ node->flags = flags;
+
+ return xnn_status_success;
+}
diff --git a/src/xnnpack/subgraph.h b/src/xnnpack/subgraph.h
index a61ad46ef..d414e7f63 100644
--- a/src/xnnpack/subgraph.h
+++ b/src/xnnpack/subgraph.h
@@ -76,13 +76,18 @@ enum xnn_node_type {
xnn_node_type_convolution_2d,
xnn_node_type_deconvolution_2d,
xnn_node_type_depthwise_convolution_2d,
+ xnn_node_type_divide,
xnn_node_type_fully_connected,
xnn_node_type_hardswish,
- xnn_node_type_multiply2,
xnn_node_type_max_pooling_2d,
+ xnn_node_type_maximum2,
+ xnn_node_type_minimum2,
+ xnn_node_type_multiply2,
xnn_node_type_prelu,
xnn_node_type_sigmoid,
xnn_node_type_softmax,
+ xnn_node_type_squared_difference,
+ xnn_node_type_subtract,
xnn_node_type_unpooling_2d,
};