summaryrefslogtreecommitdiff
path: root/nn/runtime/test/specs/V1_3
diff options
context:
space:
mode:
authorLev Proleev <levp@google.com>2019-12-19 15:18:50 +0000
committerLev Proleev <levp@google.com>2020-01-03 16:01:32 +0000
commitf40dc3a495a069d70d95187c7e2eb68e22a514bd (patch)
tree5ce61595f8fad9ead7741652c2b26f1f8ed63e5d /nn/runtime/test/specs/V1_3
parent21d5907e55f6ed8a3e08c240efb9cf5d4a644fd8 (diff)
downloadml-f40dc3a495a069d70d95187c7e2eb68e22a514bd.tar.gz
Add quant8 signed generated tests
The tests are written semi-automatically by joining all of the 1.0-1.2 tests with TENSOR_QUANT8_ASYMM operands and converting them to TENSOR_QUANT8_ASYMM_SIGNED. Also: * Fix implementation of CONCATENATION op for zero-sized tensors * Add support for TENSOR_QUANT8_ASYMM_SIGNED in test generator Bug: 136735770 Test: NNTest_static and VtsHalNeuralnetworksV1_3TargetTest Change-Id: I250dbe85684aa594892494eb53e6312c1cacb6f3
Diffstat (limited to 'nn/runtime/test/specs/V1_3')
-rw-r--r--nn/runtime/test/specs/V1_3/add_quant8_signed.mod.py96
-rw-r--r--nn/runtime/test/specs/V1_3/argmax_quant8_signed.mod.py69
-rw-r--r--nn/runtime/test/specs/V1_3/argmin_quant8_signed.mod.py68
-rw-r--r--nn/runtime/test/specs/V1_3/avg_pool_quant8_signed.mod.py349
-rw-r--r--nn/runtime/test/specs/V1_3/axis_aligned_bbox_transform_quant8_signed.mod.py122
-rw-r--r--nn/runtime/test/specs/V1_3/batch_to_space_quant8_signed.mod.py68
-rw-r--r--nn/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py93
-rw-r--r--nn/runtime/test/specs/V1_3/box_with_nms_limit_quant8_signed.mod.py562
-rw-r--r--nn/runtime/test/specs/V1_3/channel_shuffle_quant8_signed.mod.py42
-rw-r--r--nn/runtime/test/specs/V1_3/concat_quant8_signed.mod.py216
-rw-r--r--nn/runtime/test/specs/V1_3/conv2d_quant8_signed.mod.py661
-rw-r--r--nn/runtime/test/specs/V1_3/depth_to_space_quant8_signed.mod.py127
-rw-r--r--nn/runtime/test/specs/V1_3/depthwise_conv2d_quant8_signed.mod.py526
-rw-r--r--nn/runtime/test/specs/V1_3/dequantize_quant8_signed.mod.py106
-rw-r--r--nn/runtime/test/specs/V1_3/embedding_lookup_quant8_signed.mod.py52
-rw-r--r--nn/runtime/test/specs/V1_3/equal_quant8_signed.mod.py62
-rw-r--r--nn/runtime/test/specs/V1_3/expand_dims_quant8_signed.mod.py43
-rw-r--r--nn/runtime/test/specs/V1_3/fully_connected_quant8_signed.mod.py187
-rw-r--r--nn/runtime/test/specs/V1_3/gather_quant8_signed.mod.py140
-rw-r--r--nn/runtime/test/specs/V1_3/generate_proposals_quant8_signed.mod.py211
-rw-r--r--nn/runtime/test/specs/V1_3/greater_equal_quant8_signed.mod.py62
-rw-r--r--nn/runtime/test/specs/V1_3/greater_quant8_signed.mod.py62
-rw-r--r--nn/runtime/test/specs/V1_3/grouped_conv2d_quant8_signed.mod.py135
-rw-r--r--nn/runtime/test/specs/V1_3/heatmap_max_keypoint_quant8_signed.mod.py130
-rw-r--r--nn/runtime/test/specs/V1_3/l2_normalization_quant8_signed.mod.py81
-rw-r--r--nn/runtime/test/specs/V1_3/less_equal_quant8_signed.mod.py62
-rw-r--r--nn/runtime/test/specs/V1_3/less_quant8_signed.mod.py62
-rw-r--r--nn/runtime/test/specs/V1_3/logistic_quant8_signed.mod.py109
-rw-r--r--nn/runtime/test/specs/V1_3/max_pool_quant8_signed.mod.py301
-rw-r--r--nn/runtime/test/specs/V1_3/maximum_quant8_signed.mod.py64
-rw-r--r--nn/runtime/test/specs/V1_3/mean_quant8_signed.mod.py60
-rw-r--r--nn/runtime/test/specs/V1_3/minimum_quant8_signed.mod.py63
-rw-r--r--nn/runtime/test/specs/V1_3/mul_quant8_signed.mod.py96
-rw-r--r--nn/runtime/test/specs/V1_3/not_equal_quant8_signed.mod.py62
-rw-r--r--nn/runtime/test/specs/V1_3/pad_quant8_signed.mod.py160
-rw-r--r--nn/runtime/test/specs/V1_3/prelu_quant8_signed.mod.py60
-rw-r--r--nn/runtime/test/specs/V1_3/quantize_quant8_signed.mod.py68
-rw-r--r--nn/runtime/test/specs/V1_3/reduce_max_quant8_signed.mod.py70
-rw-r--r--nn/runtime/test/specs/V1_3/reduce_min_quant8_signed.mod.py70
-rw-r--r--nn/runtime/test/specs/V1_3/relu1_quant8_signed.mod.py104
-rw-r--r--nn/runtime/test/specs/V1_3/relu6_quant8_signed.mod.py103
-rw-r--r--nn/runtime/test/specs/V1_3/relu_quant8_signed.mod.py108
-rw-r--r--nn/runtime/test/specs/V1_3/reshape_quant8_signed.mod.py34
-rw-r--r--nn/runtime/test/specs/V1_3/resize_quant8_signed.mod.py412
-rw-r--r--nn/runtime/test/specs/V1_3/roi_align_quant8_signed.mod.py264
-rw-r--r--nn/runtime/test/specs/V1_3/roi_pooling_quant8_signed.mod.py152
-rw-r--r--nn/runtime/test/specs/V1_3/select_quant8_signed.mod.py64
-rw-r--r--nn/runtime/test/specs/V1_3/slice_quant8_signed.mod.py80
-rw-r--r--nn/runtime/test/specs/V1_3/softmax_quant8_signed.mod.py136
-rw-r--r--nn/runtime/test/specs/V1_3/space_to_batch_quant8_signed.mod.py212
-rw-r--r--nn/runtime/test/specs/V1_3/space_to_depth_quant8_signed.mod.py124
-rw-r--r--nn/runtime/test/specs/V1_3/split_quant8_signed.mod.py103
-rw-r--r--nn/runtime/test/specs/V1_3/squeeze_quant8_signed.mod.py44
-rw-r--r--nn/runtime/test/specs/V1_3/strided_slice_quant8_signed.mod.py288
-rw-r--r--nn/runtime/test/specs/V1_3/sub_quant8_signed.mod.py145
-rw-r--r--nn/runtime/test/specs/V1_3/tanh_quant8_signed.mod.py75
-rw-r--r--nn/runtime/test/specs/V1_3/tile_quant8_signed.mod.py90
-rw-r--r--nn/runtime/test/specs/V1_3/topk_v2_quant8_signed.mod.py32
-rw-r--r--nn/runtime/test/specs/V1_3/transpose_conv2d_quant8_signed.mod.py317
-rw-r--r--nn/runtime/test/specs/V1_3/transpose_quant8_signed.mod.py415
60 files changed, 8879 insertions, 0 deletions
diff --git a/nn/runtime/test/specs/V1_3/add_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/add_quant8_signed.mod.py
new file mode 100644
index 000000000..9fb33eabd
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/add_quant8_signed.mod.py
@@ -0,0 +1,96 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 2.0, 0")
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, 0")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, 0")
+model = model.Operation("ADD", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2],
+ i2: # input 1
+ [3, 4]}
+
+output0 = {i3: # output 0
+ [5, 8]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2}, 2.0, 0")
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, 0")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, 0")
+model = model.Operation("ADD", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2],
+ i2: # input 1
+ [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+ [3, 6, 5, 8]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+# Zero-sized input test
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# ADD op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("ADD", zero_sized, i2, 0).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1, 2],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/argmax_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/argmax_quant8_signed.mod.py
new file mode 100644
index 000000000..92f3e69bd
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/argmax_quant8_signed.mod.py
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMAX", input0, axis).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [1, 0],
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 0)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMAX", input0, axis).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [1, 1],
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# Negative axis support test.
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", -1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMAX", input0, axis).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [1, 0],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/argmin_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/argmin_quant8_signed.mod.py
new file mode 100644
index 000000000..f99c67884
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/argmin_quant8_signed.mod.py
@@ -0,0 +1,68 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Negative axis support test.
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", -1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMIN", input0, axis).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [0, 1],
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 0)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMIN", input0, axis).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [0, 0],
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMIN", input0, axis).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [0, 1],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/avg_pool_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/avg_pool_quant8_signed.mod.py
new file mode 100644
index 000000000..1c3b3f2ff
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/avg_pool_quant8_signed.mod.py
@@ -0,0 +1,349 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
+cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+o = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
+model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(o)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {o: # output 0
+ [-127, -126, -125, -124]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+bat = 5
+row = 52
+col = 60
+chn = 3
+
+i0 = Input("i0", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 0.5f, -128" % (bat, row, col, chn))
+
+std = 5
+flt = 10
+pad = 5
+
+stride = Int32Scalar("stride", std)
+filt = Int32Scalar("filter", flt)
+padding = Int32Scalar("padding", pad)
+act0 = Int32Scalar("activation", 0)
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d, %d, %d, %d}, 0.5f, -128" % (bat, output_row, output_col, chn))
+
+model = model.Operation(
+ "AVERAGE_POOL_2D", i0, padding, padding, padding, padding, stride, stride, filt, filt, act0).To(output)
+
+# Example 1. Input in operand 0,
+input_values = [127 for _ in range(bat * row * col * chn)]
+input0 = {i0: input_values}
+output_values = [127 for _ in range(bat * output_row * output_col * chn)]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+bat = 1
+row = 100
+col = 100
+chn = 1
+
+i0 = Input("i0", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 0.5f, -128" % (bat, row, col, chn))
+
+std = 4
+flt = 10
+pad = 0
+
+stride = Int32Scalar("stride", std)
+filt = Int32Scalar("filter", flt)
+padding = Int32Scalar("padding", pad)
+act0 = Int32Scalar("activation", 0)
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d, %d, %d, %d}, 0.5f, -128" % (bat, output_row, output_col, chn))
+
+model = model.Operation(
+ "AVERAGE_POOL_2D", i0, padding, padding, padding, padding, stride, stride, filt, filt, act0).To(output)
+
+# Example 1. Input in operand 0,
+input_values = [x % 4 * 2 - 128 for x in range(bat * row * col * chn)]
+input0 = {i0: input_values}
+output_values = [-125 for _ in range(bat * output_row * output_col * chn)]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 1}, 0.5f, -128")
+cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
+act2 = Int32Scalar("relu1_activitation", 2)
+o = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 1}, 0.5f, -128")
+model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act2).To(o)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -127, -126, -125, -124, -123, -122, -121, -120]}
+
+output0 = {o: # output 0
+ [-128, -127, -126, -126, -126, -126, -126, -126, -126]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 4, 1}, 0.0625f, -128") # input 0
+cons2 = Int32Scalar("cons2", 2)
+pad_same = Int32Scalar("pad_same", 1)
+act_none = Int32Scalar("act_none", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 2, 1}, 0.0625f, -128") # output 0
+model = model.Operation("AVERAGE_POOL_2D", i1, pad_same, cons2, cons2, cons2, cons2, act_none).To(i3)
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -32, -96, -64, -80, -96, 32, -16]}
+output0 = {i3: # output 0
+ [-84, -36]}
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: AVERAGE_POOL_2D_NCHW_1, pad = 0, stride = 1, filter = 1, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("AVERAGE_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 2.0, 3.0, 4.0],
+ o1: [1.0, 2.0, 3.0, 4.0]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 2: AVERAGE_POOL_2D_NCHW_2, act = none
+bat = 5
+row = 52
+col = 60
+chn = 3
+std = 5
+flt = 100
+pad = 50
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i2 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o2 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("AVERAGE_POOL_2D", i2, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1. for _ in range(bat * row * col * chn)],
+ o2: [1. for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 3: AVERAGE_POOL_2D_NCHW_3, act = none
+bat = 1
+row = 200
+col = 180
+chn = 1
+std = 2
+flt = 10
+pad = 0
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i3 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o3 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("AVERAGE_POOL_2D", i3, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [x % 2 for x in range(bat * row * col * chn)],
+ o3: [.5 for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 4: AVERAGE_POOL_2D_NCHW_4, act = relu6
+bat = 5
+row = 52
+col = 60
+chn = 3
+std = 5
+flt = 100
+pad = 50
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i4 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o4 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("AVERAGE_POOL_2D", i4, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [10 for _ in range(bat * row * col * chn)],
+ o4: [6 for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 5: AVERAGE_POOL_2D_NCHW_5, pad = same, stride = 2, filter = 2, act = none
+i5 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
+o5 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 2, 1}")
+Model().Operation("AVERAGE_POOL_2D", i5, 1, 2, 2, 2, 2, 0, layout).To(o5)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ o5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i5: [0, 6, 2, 4, 3, 2, 10, 7],
+ o5: [2.75, 5.75]
+}).AddNchw(i5, o5, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 6: zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# AVERAGE_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("AVERAGE_POOL_2D", zero_sized, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 7: zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# AVERAGE_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("AVERAGE_POOL_2D", zero_sized, 1, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/axis_aligned_bbox_transform_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/axis_aligned_bbox_transform_quant8_signed.mod.py
new file mode 100644
index 000000000..17ca33ea5
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/axis_aligned_bbox_transform_quant8_signed.mod.py
@@ -0,0 +1,122 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: AXIS_ALIGNED_BBOX_TRANSFORM
+r1 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+d1 = Input("bboxDeltas", "TENSOR_FLOAT32", "{5, 8}")
+b1 = Input("batchSplit", "TENSOR_INT32", "{5}")
+i1 = Input("imageInfo", "TENSOR_FLOAT32", "{4, 2}")
+o1 = Output("out", "TENSOR_FLOAT32", "{5, 8}")
+model1 = Model().Operation("AXIS_ALIGNED_BBOX_TRANSFORM", r1, d1, b1, i1).To(o1)
+
+quant8_signed = DataTypeConverter().Identify({
+ r1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ d1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.05, 0),
+ i1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+inputs = {
+ r1: [100, 150, 400, 430,
+ 120, 60, 122, 61,
+ 10, 20, 20, 50,
+ 50, 120, 150, 250,
+ 400, 100, 1000, 2000],
+ d1: [0.2, 0.2, 0.1, 0.1,
+ 0.3, -0.1, -0.2, 0.1,
+ -0.5, 0.2, 0.2, -0.5,
+ -0.1, -0.1, 2.5, 3,
+ -0.5, -0.5, 1, 1,
+ 0.5, 0.5, -1.5, -1.2,
+ 0.2, 0.2, -3, -4,
+ 1, -0.5, 0.3, 0.5,
+ 0.3, -0.2, 1.1, -0.8,
+ 0.1, 0.05, -0.5, -0.5],
+ b1: [0, 1, 2, 2, 3],
+ i1: [512, 512,
+ 128, 256,
+ 256, 256,
+ 1024, 512]
+}
+
+Example((inputs, {
+ o1: [144.224350, 191.276062, 475.775635, 500.723938,
+ 217.190384, 107.276062, 462.809631, 416.723938,
+ 118.778594, 60.396736, 121.221406, 61.003266,
+ 108.617508, 50.357232, 132.982498, 70.442772,
+ 0.000000, 0.000000, 23.59140714, 60.77422571,
+ 18.88435 , 45.48208571, 21.11565 , 54.51791429,
+ 117.51063714, 209.80948286, 122.48935143, 212.19050857,
+ 132.50705143, 12.83312286, 255.99999571, 227.16685714,
+ 0. , 243.1374815, 512. , 1024. ,
+ 512. , 568.7958375, 512. , 1024. ]
+}), model=model1).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# TEST 2: AXIS_ALIGNED_BBOX_TRANSFORM_ZERO_BATCH
+r2 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+d2 = Input("bboxDeltas", "TENSOR_FLOAT32", "{5, 8}")
+b2 = Input("batchSplit", "TENSOR_INT32", "{5}")
+i2 = Input("imageInfo", "TENSOR_FLOAT32", "{7, 2}")
+o2 = Output("out", "TENSOR_FLOAT32", "{5, 8}")
+model2 = Model().Operation("AXIS_ALIGNED_BBOX_TRANSFORM", r2, d2, b2, i2).To(o2)
+
+quant8_signed = DataTypeConverter().Identify({
+ r2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ d2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.05, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+inputs = {
+ r2: [100, 150, 400, 430,
+ 120, 60, 122, 61,
+ 10, 20, 20, 50,
+ 50, 120, 150, 250,
+ 400, 100, 1000, 2000],
+ d2: [0.2, 0.2, 0.1, 0.1,
+ 0.3, -0.1, -0.2, 0.1,
+ -0.5, 0.2, 0.2, -0.5,
+ -0.1, -0.1, 2.5, 3,
+ -0.5, -0.5, 1, 1,
+ 0.5, 0.5, -1.5, -1.2,
+ 0.2, 0.2, -3, -4,
+ 1, -0.5, 0.3, 0.5,
+ 0.3, -0.2, 1.1, -0.8,
+ 0.1, 0.05, -0.5, -0.5],
+ b2: [0, 2, 5, 5, 6],
+ i2: [512, 512,
+ 32, 32,
+ 128, 256,
+ 32, 32,
+ 32, 32,
+ 256, 256,
+ 1024, 512]
+}
+
+Example((inputs, {
+ o2: [144.224350, 191.276062, 475.775635, 500.723938,
+ 217.190384, 107.276062, 462.809631, 416.723938,
+ 118.778594, 60.396736, 121.221406, 61.003266,
+ 108.617508, 50.357232, 132.982498, 70.442772,
+ 0.000000, 0.000000, 23.59140714, 60.77422571,
+ 18.88435 , 45.48208571, 21.11565 , 54.51791429,
+ 117.51063714, 209.80948286, 122.48935143, 212.19050857,
+ 132.50705143, 12.83312286, 255.99999571, 227.16685714,
+ 0. , 243.1374815, 512. , 1024. ,
+ 512. , 568.7958375, 512. , 1024. ]
+}), model=model2).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/batch_to_space_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/batch_to_space_quant8_signed.mod.py
new file mode 100644
index 000000000..21c8dff15
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/batch_to_space_quant8_signed.mod.py
@@ -0,0 +1,68 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 2, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 4, 1}, 1.0, 0")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+ [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]}
+
+# Instantiate an example
+Example((input0, output0))#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: BATCH_TO_SPACE_NCHW_1, block_size = [2, 2]
+i1 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+Model().Operation("BATCH_TO_SPACE_ND", i1, [2, 2], layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+
+# TEST 2: BATCH_TO_SPACE_NCHW_2, block_size = [2, 2]
+i2 = Input("op1", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+Model().Operation("BATCH_TO_SPACE_ND", i2, [2, 2], layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ o2: [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py
new file mode 100644
index 000000000..6afab65c7
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py
@@ -0,0 +1,93 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# Operation 1, GENERATE_PROPOSALS
+scores = Input("scores", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+deltas = Input("deltas", "TENSOR_FLOAT32", "{1, 1, 1, 4}")
+anchors = Input("anchors", "TENSOR_FLOAT32", "{1, 4}")
+image = Input("imageInfo", "TENSOR_FLOAT32", "{1, 2}")
+scoresOut_1 = Output("scores", "TENSOR_FLOAT32", "{0}")
+roiOut_1 = Internal("roi", "TENSOR_FLOAT32", "{0, 4}")
+batchOut_1 = Internal("batches", "TENSOR_INT32", "{0}")
+model = Model("zero_sized").Operation("GENERATE_PROPOSALS", scores, deltas, anchors, image, 1.0, 1.0, -1, -1, 0.3, 10.0, layout).To(scoresOut_1, roiOut_1, batchOut_1)
+
+# Operation 2, ROI_ALIGN
+feature = Input("featureMap", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+featureOut_2 = Internal("scores", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", feature, roiOut_1, batchOut_1, 2, 2, 1.0, 1.0, 4, 4, layout).To(featureOut_2)
+
+# Operation 3, FULLY_CONNECTED
+weights_3 = Parameter("weights", "TENSOR_FLOAT32", "{8, 4}", [1] * 32)
+bias_3 = Parameter("bias", "TENSOR_FLOAT32", "{8}", [1] * 8)
+deltaOut_3 = Internal("delta", "TENSOR_FLOAT32", "{0, 8}")
+model = model.Operation("FULLY_CONNECTED", featureOut_2, weights_3, bias_3, 0).To(deltaOut_3)
+
+# Operation 4, FULLY_CONNECTED
+weights_4 = Parameter("weights", "TENSOR_FLOAT32", "{2, 4}", [1] * 8)
+bias_4 = Parameter("bias", "TENSOR_FLOAT32", "{2}", [1] * 2)
+scoresOut_4 = Internal("scores", "TENSOR_FLOAT32", "{0, 2}")
+model = model.Operation("FULLY_CONNECTED", featureOut_2, weights_4, bias_4, 0).To(scoresOut_4)
+
+# Operation 5, AXIS_ALIGNED_BBOX_TRANSFORM
+roiOut_5 = Internal("roi", "TENSOR_FLOAT32", "{0, 8}")
+model = model.Operation("AXIS_ALIGNED_BBOX_TRANSFORM", roiOut_1, deltaOut_3, batchOut_1, image).To(roiOut_5)
+
+# Operation 6, BOX_WITH_NMS_LIMIT
+scoresOut_6 = Output("scores", "TENSOR_FLOAT32", "{0}")
+roiOut_6 = Output("roi", "TENSOR_FLOAT32", "{0, 4}")
+classOut_6 = Output("classes", "TENSOR_INT32", "{0}")
+batchOut_6 = Output("batches", "TENSOR_INT32", "{0}")
+model = model.Operation("BOX_WITH_NMS_LIMIT", scoresOut_4, roiOut_5, batchOut_1, 0.1, -1, 0, 0.3, 1.0, 0.1).To(scoresOut_6, roiOut_6, classOut_6, batchOut_6)
+
+quant8_signed = DataTypeConverter().Identify({
+ scores: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ deltas: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ anchors: ("TENSOR_QUANT16_SYMM", 0.125, 0),
+ image: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ scoresOut_1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ roiOut_1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ feature: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ featureOut_2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ weights_3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ bias_3: ("TENSOR_INT32", 0.01, 0),
+ deltaOut_3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ weights_4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ bias_4: ("TENSOR_INT32", 0.01, 0),
+ scoresOut_4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ roiOut_5: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ scoresOut_6: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ roiOut_6: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+})
+
+Example({
+
+ # Inputs that will lead to zero-sized output of GENERATE_PROPOSALS
+ scores: [0.5],
+ deltas: [0, 0, -10, -10],
+ anchors: [0, 0, 10, 10],
+ image: [32, 32],
+ feature: [1],
+
+ # Dummy outputs
+ scoresOut_1: [],
+ scoresOut_6: [],
+ roiOut_6: [],
+ classOut_6: [],
+ batchOut_6: [],
+
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/box_with_nms_limit_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/box_with_nms_limit_quant8_signed.mod.py
new file mode 100644
index 000000000..594799c00
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/box_with_nms_limit_quant8_signed.mod.py
@@ -0,0 +1,562 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# BOX_WITH_NMS_LIMIT, score_threshold = 0.3, sigma = 0.5, max_detections = -1
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{18}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{18, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{18}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{18}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 2, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.7879927, 0.52485234, 0.47400165, 0.95, 0.6894936, 0.4812244, 0.42367333,
+ 0.95, 0.89983034, 0.7879927, 0.52485234, 0.47400165, 0.95, 0.8, 0.6894936, 0.4811337, 0.42367333
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 6, 6, 16, 16,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 7, 7, 17, 17,
+ 3, 3, 13, 13,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19,
+ 1, 1, 11, 11,
+ 5, 5, 15, 15
+ ],
+ o3: [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
+ o4: [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+}
+
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{10, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{10}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{10}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 2, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.7879927, 0.52485234, 0.95, 0.6894936,
+ 0.95, 0.89983034, 0.7879927, 0.95, 0.8
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 6, 6, 16, 16,
+ 2, 2, 12, 12,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 7, 7, 17, 17,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 2, 2],
+ o4: [1, 1, 1, 1, 1, 3, 3, 3, 3, 3],
+}
+
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = -1
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{12}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{12, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{12}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{12}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, o2, o3, o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
+}
+
+output0 = {
+ o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2],
+ o4: [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
+}
+
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{10, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{10}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{10}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 0, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
+}
+
+output0 = {
+ o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.95, 0.8],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 2, 2],
+ o4: [1, 1, 1, 1, 1, 3, 3, 3, 3, 3],
+}
+
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = -1
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{16}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{16, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{16}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{16}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 1, 0.4, 1.0, 0.3).To(o1, o2, o3, o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.85, 0.75, 0.95, 0.7, 0.42352945, 0.39705884,
+ 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7, 0.42352945, 0.39705884
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 4, 4, 14, 14,
+ 0, 0, 10, 10,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19,
+ 5, 5, 15, 15,
+ 1, 1, 11, 11
+ ],
+ o3: [1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2],
+ o4: [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+}
+
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{15}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{15, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{15}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{15}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 8, 1, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.85, 0.75, 0.95, 0.7, 0.42352945, 0.39705884,
+ 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7, 0.42352945
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 4, 4, 14, 14,
+ 0, 0, 10, 10,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19,
+ 5, 5, 15, 15
+ ],
+ o3: [1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2],
+ o4: [1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3],
+}
+
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/channel_shuffle_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/channel_shuffle_quant8_signed.mod.py
new file mode 100644
index 000000000..043711d58
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/channel_shuffle_quant8_signed.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 3, 12}") # input 0
+o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 3, 12}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+Model().Operation("CHANNEL_SHUFFLE", i1, 3, axis).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0)
+})
+
+Example({
+ i1: list(range(2*2*3*12)),
+ o1: [ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
+ 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23,
+ 24, 28, 32, 25, 29, 33, 26, 30, 34, 27, 31, 35,
+ 36, 40, 44, 37, 41, 45, 38, 42, 46, 39, 43, 47,
+ 48, 52, 56, 49, 53, 57, 50, 54, 58, 51, 55, 59,
+ 60, 64, 68, 61, 65, 69, 62, 66, 70, 63, 67, 71,
+ 72, 76, 80, 73, 77, 81, 74, 78, 82, 75, 79, 83,
+ 84, 88, 92, 85, 89, 93, 86, 90, 94, 87, 91, 95,
+ 96, 100, 104, 97, 101, 105, 98, 102, 106, 99, 103, 107,
+ 108, 112, 116, 109, 113, 117, 110, 114, 118, 111, 115, 119,
+ 120, 124, 128, 121, 125, 129, 122, 126, 130, 123, 127, 131,
+ 132, 136, 140, 133, 137, 141, 134, 138, 142, 135, 139, 143]
+}).AddVariations(quant8_signed, includeDefault=False).AddAllDimsAndAxis(i1, o1, axis)
diff --git a/nn/runtime/test/specs/V1_3/concat_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/concat_quant8_signed.mod.py
new file mode 100644
index 000000000..d311b43d2
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/concat_quant8_signed.mod.py
@@ -0,0 +1,216 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Adapted from tensorflow/lite/kernels/concatenation_test.cc
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 1, 2}")
+input1 = Input("input1", "TENSOR_FLOAT32", "{2, 1, 2}")
+input2 = Input("input2", "TENSOR_FLOAT32", "{2, 1, 2}")
+input3 = Input("input3", "TENSOR_FLOAT32", "{2, 1, 2}")
+axis = 2
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 1, 8}")
+
+model = Model().Operation("CONCATENATION", input0, input1, input2, input3, axis).To(output0)
+
+# FourInputsQuantizedMixedRange
+Example({
+ input0: [1.0, -3.0, -4.0, -7.0],
+ input1: [1.1, 3.1, 4.1, 7.1],
+ input2: [1.2, -3.2, -4.2, 7.2],
+ input3: [1.3, 3.3, 4.3, 7.3],
+ output0: [1.0, -3.0, 1.1, 3.1, 1.2, -3.2, 1.3, 3.3, -4.0, -7.0, 4.1, 7.1, -4.2, 7.2, 4.3, 7.3],
+}).AddVariations(DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.084, -1],
+ input1: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.05, -128],
+ input2: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.089, -5],
+ input3: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.029, -128],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -1],
+}), includeDefault=False)
+
+# FourInputsQuantizedMixedRangeClampingLogic
+Example({
+ input0: [1.0, -3.0, -4.0, -7.0],
+ input1: [1.1, 3.1, 4.1, 7.1],
+ input2: [1.2, -3.2, -4.2, 7.2],
+ input3: [1.3, 3.3, 4.3, 7.3],
+ output0: [1.0, -1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0]
+}).AddVariations(DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.084, -1],
+ input1: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.05, -128],
+ input2: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.089, -5],
+ input3: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.029, -128],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.0078125, -1],
+}), includeDefault=False)
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 0.5f, -128") # input 0
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 0.5f, -128") # input 1
+axis1 = Int32Scalar("axis1", 1)
+r = Output("result", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 6}, 0.5f, -128") # output
+model = model.Operation("CONCATENATION", i1, i2, axis1).To(r)
+
+# Example 1.
+input0 = {i1: [1, 2, 3, 4, 5, 6],
+ i2: [7, 8, 9, 10, 11, 12]}
+output0 = {r: [1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+row1 = 52
+row2 = 40
+col = 300
+output_row = row1 + row2
+
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}, 0.5f, -128" % (row1, col))
+input2 = Input("input2", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}, 0.5f, -128" % (row2, col))
+axis0 = Int32Scalar("axis0", 0)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}, 0.5f, -128" % (output_row, col))
+model = model.Operation("CONCATENATION", input1, input2, axis0).To(output)
+
+# Example 1.
+input1_values = [x % 256 for x in range(row1 * col)]
+input2_values = (lambda s1 = row1 * col, s2 = row2 * col:
+ [(x + s1) % 256 for x in range(s2)])()
+input0 = {input1: [x - 128 for x in input1_values],
+ input2: [x - 128 for x in input2_values]}
+output_values = [x % 256 - 128 for x in range(output_row * col)]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+row = 400
+col1 = 60
+col2 = 30
+output_col = col1 + col2
+
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}, 0.5f, -128" % (row, col1))
+input2 = Input("input2", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}, 0.5f, -128" % (row, col2))
+axis1 = Int32Scalar("axis1", 1)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}, 0.5f, -128" % (row, output_col))
+model = model.Operation("CONCATENATION", input1, input2, axis1).To(output)
+
+# Example 1.
+input1_values = [(x % 128) for x in range(row * col1)]
+input2_values = [x % 128 - 128 for x in range(row * col2)]
+input0 = {input1: input1_values,
+ input2: input2_values}
+
+output_values = [x for x in range(row * output_col)]
+for r in range(row):
+ for c1 in range(col1):
+ output_values[r * output_col + c1] = input1_values[r * col1 + c1]
+ for c2 in range(col2):
+ output_values[r * output_col + col1 + c2] = input2_values[r * col2 + c2]
+
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+# Zero-sized input: zero dimension is not "axis"
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONCATENATION op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("CONCATENATION", zero_sized, zero_sized, 3).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# Zero-sized input: zero dimension is "axis"
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONCATENATION op with numBatches = 0.
+i2 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o3 = Output("out", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # out
+model = model.Operation("CONCATENATION", zero_sized, i2, 0).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.2, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ i2: [1, 2, 3, 4],
+ o1: [],
+ o2: [],
+ o3: [1, 2, 3, 4],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/conv2d_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/conv2d_quant8_signed.mod.py
new file mode 100644
index 000000000..d9ca49a13
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/conv2d_quant8_signed.mod.py
@@ -0,0 +1,661 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# dilation set to 1 (default)
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ o1: [.875, .875, .875, .875]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+
+# dilation set to 3
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 9, 9, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 2, 3, 4, 5, 6, 7, 8, 9])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 0, layout, 3, 3).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ o2: [5, 5, 5, 5, 5, 5, 5, 5, 5]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+# same as test 1 but with implicit VALID padding
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("CONV_2D", i1, f1, b1, 2, 1, 1, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ o1: [.875, .875, .875, .875]
+}, name="valid_padding").AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+
+# same as test 2 but with implicit VALID padding
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 9, 9, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 2, 3, 4, 5, 6, 7, 8, 9])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("CONV_2D", i2, f2, b2, 2, 1, 1, 0, layout, 3, 3).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ o2: [5, 5, 5, 5, 5, 5, 5, 5, 5]
+}, name="valid_padding").AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+
+# dilation set to 3, SAME padding
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 6, 6, 1}")
+f3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4])
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("CONV_2D", i3, f3, b3, 1, 2, 2, 0, layout, 3, 3).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b3: ("TENSOR_INT32", 0.0625, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 4, 3, 0, 0,
+ 0, 0, 2, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0],
+ o3: [16, 0, 9, 0, 0, 0, 4, 0, 1]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+# No layout param specified
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 1, 2}, 0.5f, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{3, 1, 1, 2}",
+ [1, 2, 1, 2, 1, 2], extraParams = SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.75, 1.0]))
+b1 = Parameter("op3", "TENSOR_INT32", "{3}", [4, 4, 4])
+o1 = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 1, 3}, 1.f, 0")
+Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0).To(o1)
+
+# Instantiate an example
+Example({
+ i1: [10, 10, 10, 10, 10, 10],
+ o1: [9, 13, 17, 9, 13, 17, 9, 13, 17]
+})
+
+# layout param, NHWC/NCHW layouts
+layout = BoolScalar("layout", False) # NHWC
+i2 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 1, 2}, 0.5f, 0")
+f2 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{3, 1, 1, 2}",
+ [1, 2, 1, 2, 1, 2], extraParams = SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.75, 1.0]))
+b2 = Parameter("op3", "TENSOR_INT32", "{3}", [4, 4, 4])
+o2 = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 1, 3}, 1.f, 0")
+Model("layouts").Operation("CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 0, layout).To(o2)
+
+# Instantiate an example
+Example({
+ i2: [10, -20, 10, -20, 10, -20],
+ o2: [-7, -10, -13, -7, -10, -13, -7, -10, -13]
+}).AddNchw(i2, o2, layout)
+
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2}, 0.1f, 0", [9, 1]) # scores
+p2 = Parameter("roi", "TENSOR_QUANT16_ASYMM", "{1, 8}, 0.125f, 0", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_QUANT8_ASYMM_SIGNED", "{0}, 0.1f, 0") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_QUANT16_ASYMM", "{0, 4}, 0.125f, 0") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 2}, 0.5f, 0")
+zero_sized = Internal("featureMap", "TENSOR_QUANT8_ASYMM_SIGNED", "{0, 2, 2, 2}, 0.5f, 0")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{3, 1, 1, 2}",
+ [1, 2, 1, 2, 1, 2], extraParams = SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.75, 1.0]))
+b = Parameter("bias", "TENSOR_INT32", "{3}", [4, 4, 4])
+o3 = Output("out", "TENSOR_QUANT8_ASYMM_SIGNED", "{0, 2, 2, 3}, 1.f, 0") # out
+model = model.Operation("CONV_2D", zero_sized, w, b, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
+
+Example({
+ i1: [2, 2],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout)
+
+layout = BoolScalar("layout", False) # NHWC
+
+# CONV_NCHW_1
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ o1: [.875, .875, .875, .875]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, channelquant8_signed, includeDefault=False)
+
+
+# CONV_NCHW_2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 4, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 4, 7, 2, 5, 8, 3, 6, 9])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 4, 1}")
+Model().Operation("CONV_2D", i2, f2, b2, 1, 1, 1, 1, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1),
+ b2: ("TENSOR_INT32", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -78)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1),
+ f2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -78)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+ o2: [0, 0, 0, 0, 35, 112, 157, 0, 0, 34, 61, 0]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, channelquant8_signed, includeDefault=False)
+
+
+# CONV_NCHW_CHANNEL
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+f3 = Parameter("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}", [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5])
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+Model("channel").Operation("CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ b3: ("TENSOR_INT32", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f3: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.4, 0.3])),
+ b3: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.2, 0.15], hide=True)),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [5., 5., 5.],
+ o3: [15., 37.5, 60.]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, channelquant8_signed, includeDefault=False)
+
+
+# CONV_NCHW_LARGE
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
+f4 = Parameter("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}", [1., 4., 7., 2., 5., 8., 3., 6., 9.])
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
+Model("large").Operation("CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b4: ("TENSOR_INT32", 0.25, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -128)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f4: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 0.5])),
+ b4: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5, 0.25], hide=True)),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -128)
+})
+channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -1),
+ f4: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 1.005])),
+ b4: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 1.005], hide=True)),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -1)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18.],
+ o4: [30., 36., 42.,
+ 66., 81., 96.,
+ 102., 126., 150.,
+ 138., 171., 204.,
+ 174., 216., 258.,
+ 210., 261., 312.]
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, channelquant8_signed, channelQuant8_mult_gt_1, includeDefault=False)
+
+# quantized with scale product greater than output scale
+scale = 256.5 / 255
+zero_point = 0
+i9 = Input("op1", ("TENSOR_QUANT8_ASYMM_SIGNED", [2, 2, 4, 1], scale, zero_point))
+f9 = Parameter("op2", ("TENSOR_QUANT8_ASYMM_SIGNED", [3, 2, 2, 1], scale, zero_point),
+ [1, 2, 3, 4, -1, 1, -1, 1, -1, -1, 1, 1])
+b9 = Parameter("op3", ("TENSOR_INT32", [3], scale * scale, 0), [1, 2, 3])
+o9 = Output("op4", ("TENSOR_QUANT8_ASYMM_SIGNED", [2, 1, 2, 3], 1.0, -1))
+model9 = Model("quant_output_multiplier_gt_1").Operation("CONV_2D", i9, f9, b9, 2, 2, 2, 0).To(o9)
+
+# Instantiate an example
+example = Example({
+ i9: [
+ 1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 3, 4, 1, 2,
+ 3, 4
+ ],
+ o9: [17, 1, 4, 17, 1, 4, 16, 3, 2, 36, 3, 2]
+}, model=model9)
+
+
+# zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 1, 1, 1}", [3, 4]) # weights
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [1, 2]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("CONV_2D", zero_sized, w, b, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+
+# zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 1, 1, 1}", [3, 4]) # weights
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [1, 2]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("CONV_2D", zero_sized, w, b, 1, 1, 1, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 6, 1}, 0.5f, -1")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -1",
+ [1, 3, 5, 7])
+b1 = Parameter("op3", "TENSOR_INT32", "{1}, 0.25f, 0", [-4])
+pad_valid = Int32Scalar("pad_valid", 2)
+act_none = Int32Scalar("act_none", 0)
+stride1 = Int32Scalar("stride1", 1)
+stride3 = Int32Scalar("stride3", 3)
+
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, -1")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad_valid, stride3,
+ stride1, act_none).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [5, 3, 1, -3, -5, -7,
+ 7, 5, 3, -5, -7, -9,
+ 9, 7, 5, -7, -9, -11]
+}
+
+output0 = {
+ output: # output 0
+ [29, -25, 39, -35]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 3}, 0.5f, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1, 1, 3}, 0.5f, -128", [-127, -126, -125, -124, -123, -122, -121, -120, -119])
+b1 = Parameter("op3", "TENSOR_INT32", "{3}, 0.25, 0", [0, 0, 0])
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 3}, 1.0, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-118, -118, -118]}
+
+output0 = {output: # output 0
+ [-113, -90, -68]}
+
+# Instantiate an example
+Example((input0, output0))
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 3}, 0.5f, -128")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1, 1, 3}, 0.5f, -128")
+b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 3}, 1.0, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-118, -118, -118],
+ f1:
+ [-127, -126, -125,
+ -124, -123, -122,
+ -121, -120, -119],
+ b1:
+ [0, 0, 0]}
+
+output0 = {output: # output 0
+ [-113, -90, -68]}
+
+# Instantiate an example
+Example((input0, output0))
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 3}, 0.5, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1, 1, 3}, 0.5, -128", [-127, -124, -121, -126, -123, -120, -125, -122, -119])
+b1 = Parameter("op3", "TENSOR_INT32", "{3}, 0.25, 0", [0, 0, 0])
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 3}, 1.0, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [ -127, -126, -125, -124, -123, -122, -121, -120, -119,
+ -118, -117, -116, -115, -114, -113, -112, -111, -110]}
+
+output0 = {output: # output 0
+ [ -120, -119, -117,
+ -111, -107, -104,
+ -102, -96, -90,
+ -93, -85, -77,
+ -84, -74, -63,
+ -75, -62, -50]
+ }
+
+# Instantiate an example
+Example((input0, output0))
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 3}, 0.5, -128")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1, 1, 3}, 0.5, -128")
+b1 = Input("op3", "TENSOR_INT32", "{3}, 0.25, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 3}, 1.0, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [ -127, -126, -125, -124, -123, -122, -121, -120, -119,
+ -118, -117, -116, -115, -114, -113, -112, -111, -110],
+ f1:
+ [ -127, -124, -121,
+ -126, -123, -120,
+ -125, -122, -119],
+ b1:
+ [0, 0, 0]}
+
+output0 = {output: # output 0
+ [ -120, -119, -117,
+ -111, -107, -104,
+ -102, -96, -90,
+ -93, -85, -77,
+ -84, -74, -63,
+ -75, -62, -50]
+ }
+
+# Instantiate an example
+Example((input0, output0))
+
+# conv_quant8.mod.py with biases and filter being constants
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 1}, 0.5f, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128",
+ [-126, -126, -126, -126])
+b1 = Parameter("op3", "TENSOR_INT32", "{1}, 0.25f, 0", [4])
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+# output dimension:
+# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride,
+ stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [-120, -120, -120, -120, -124, -120, -120, -120, -120]
+}
+# (i1 (conv) f1) + b1
+output0 = {
+ output: # output 0
+ [-113, -113, -113, -113]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 3}, 0.5, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1, 1, 3}, 0.5, -128",
+ [-118, -88, -58, -108, -78, -48, -98, -68, -38])
+b1 = Parameter("op3", "TENSOR_INT32", "{3}, 0.25, 0", [0, 0, 0])
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 3}, 1.0, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [ -127, -126, -125, -124, -123, -122, -121, -120, -119,
+ -118, -117, -116, -115, -114, -113, -112, -111, -110]}
+
+output0 = {output: # output 0
+ [ -53, -38, -23,
+ 37, 75, 112,
+ 127, 127, 127,
+ 127, 127, 127,
+ 127, 127, 127,
+ 127, 127, 127]
+ }
+
+# Instantiate an example
+Example((input0, output0))
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 1}, 0.5f, -128")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
+b1 = Input("op3", "TENSOR_INT32", "{1}, 0.25f, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+# output dimension:
+# (i1.height - f1.height + 1) x (i1.width - f1.width + 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, -128")
+
+model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-120, -120, -120, -120, -124, -120, -120, -120, -120],
+ f1:
+ [-126, -126, -126, -126],
+ b1:
+ [4]}
+# (i1 (conv) f1) + b1
+output0 = {output: # output 0
+ [-113, -113, -113, -113]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_3/depth_to_space_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/depth_to_space_quant8_signed.mod.py
new file mode 100644
index 000000000..5c046b8d4
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/depth_to_space_quant8_signed.mod.py
@@ -0,0 +1,127 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 8}, 0.5f, 0")
+block = Int32Scalar("radius", 2)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, 0")
+
+model = model.Operation("DEPTH_TO_SPACE", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [-127, -126, -125, -124, 124, 125, 126, 127]
+}
+
+output0 = {
+ output: # output 0
+ [-127, -126, -125, -124, 124, 125, 126, 127]
+}
+
+# Instantiate an example
+Example((input0, output0))
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 4}, 0.5f, 0")
+block = Int32Scalar("radius", 2)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 4, 1}, 0.5f, 0")
+
+model = model.Operation("DEPTH_TO_SPACE", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [
+ -128, -127, -124, -123, -126, -125, -122, -121, 120, 121, 124, 125,
+ 122, 123, 126, 127
+ ]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -128, -127, -126, -125, -124, -123, -122, -121, 120, 121, 122, 123,
+ 124, 125, 126, 127
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# DEPTH_TO_SPACE_NCHW_1, block_size = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 8}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+Model().Operation("DEPTH_TO_SPACE", i1, 2, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# DEPTH_TO_SPACE_NCHW_2, block_size = 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+Model().Operation("DEPTH_TO_SPACE", i2, 2, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.],
+ o2: [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# DEPTH_TO_SPACE_NCHW_3, block_size = 2
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 8}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+Model().Operation("DEPTH_TO_SPACE", i3, 2, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [10, 20, 11, 21, 14, 24, 15, 25,
+ 12, 22, 13, 23, 16, 26, 17, 27,
+ 18, 28, 19, 29, 112, 212, 113, 213,
+ 110, 210, 111, 211, 114, 214, 115, 215],
+ o3: [10, 20, 11, 21, 12, 22, 13, 23,
+ 14, 24, 15, 25, 16, 26, 17, 27,
+ 18, 28, 19, 29, 110, 210, 111, 211,
+ 112, 212, 113, 213, 114, 214, 115, 215]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/depthwise_conv2d_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/depthwise_conv2d_quant8_signed.mod.py
new file mode 100644
index 000000000..ed7cb0e5f
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/depthwise_conv2d_quant8_signed.mod.py
@@ -0,0 +1,526 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# dilation set to 1 (default)
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0., .2, 0., .25, 0., 0., .3, .25, 0., 0., 0., .25, .1, 0., 0.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 2, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ b1: ("TENSOR_INT32", 0.005, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ o1: [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# dilation set to 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [0,0,0,0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 2, 0, layout, 2, 2).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,],
+ o2: [13, 14, 0, 0,
+ 0, 0, 11, 12,
+ 5, 6, 0, 0,
+ 0, 0, 3, 4]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# same as test 1 but with implicit padding
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0., .2, 0., .25, 0., 0., .3, .25, 0., 0., 0., .25, .1, 0., 0.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 2, 1, 1, 2, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ b1: ("TENSOR_INT32", 0.005, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ o1: [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]
+}, name="valid_padding").AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# same as test 2 but with implicit padding
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [0,0,0,0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 2, 1, 1, 2, 0, layout, 2, 2).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128),
+ b2: ("TENSOR_INT32", 0.05, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,],
+ o2: [13, 14, 0, 0,
+ 0, 0, 11, 12,
+ 5, 6, 0, 0,
+ 0, 0, 3, 4]
+}, name="valid_padding").AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# dilation set to 3, padding SAME, stride 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 6, 6, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 1, 2, 2, 1, 0, layout, 3, 3).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 0, 0,
+ 0, 0, 1, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0],
+ o2: [4, 0, 3,
+ 0, 0, 0,
+ 2, 0, 1]
+}, name="same_padding_stride_2").AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# Same scales, zeroPoint = 0
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{1, 2, 2, 2}",
+ [2, 4, 2, 0, 2, 2, 2, 0],
+ extraParams = SymmPerChannelQuantParams(channelDim=3, scales=[0.5, 0.5]))
+b1 = Parameter("op3", "TENSOR_INT32", "{2}", [0, 0])
+o1 = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 2}, 1.f, -128")
+Model("same").Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 1, 0).To(o1)
+
+# Instantiate an example
+Example({
+ i1: [-124, -112, -124, -96, -124, -64, -124, 0],
+ o1: [-120, -80],
+})
+
+#######################################################
+
+# Different scales, zeroPoint=128
+i2 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 2}, 0.5f, 0")
+f2 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{1, 2, 2, 4}",
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ extraParams = SymmPerChannelQuantParams(channelDim=3, scales=[1.0, 0.5, 1.0, 0.5]))
+b2 = Parameter("op3", "TENSOR_INT32", "{4}", [4, 4, 4, 4])
+o2 = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 4}, 1.f, 0")
+Model("different").Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 2, 0).To(o2)
+
+# Instantiate an example
+Example({
+ i2: [1, 2] * 9,
+ o2: [4, 2, 6, 3, 4, 2, 6, 3,
+ 4, 2, 6, 3, 4, 2, 6, 3],
+})
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# With layout param
+i3 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 2}, 0.5f, 0")
+f3 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{1, 2, 2, 4}",
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ extraParams = SymmPerChannelQuantParams(channelDim=3, scales=[1.0, 0.5, 1.0, 0.5]))
+b3 = Parameter("op3", "TENSOR_INT32", "{4}", [4, 4, 4, 4])
+o3 = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 4}, 1.f, 0")
+Model("layout").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 2, 0, layout).To(o3)
+
+# Instantiate an example
+Example({
+ i3: [1, 2] * 9,
+ o3: [4, 2, 6, 3, 4, 2, 6, 3,
+ 4, 2, 6, 3, 4, 2, 6, 3],
+}).AddNchw(i3, o3, layout)
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 2, 2}, 0.5f, -1")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 4}, 0.5f, -1", [1, 3, 5, 7, -19, 19, -23, 23, 9, 11, 13, 15, 25, -29, 29, -33])
+b1 = Parameter("op3", "TENSOR_INT32", "{4}, 0.25f, 0", [4, 8, 12, 16])
+pad_valid = Int32Scalar("pad_valid", 2)
+act_none = Int32Scalar("act_none", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 2)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 1, 4}, 1.f, -1")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad_valid,
+ stride, stride,
+ cm, act_none).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 3, 13, 15,
+ 5, 7, 17, 19,
+ 9, 11, 21, 23]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [70, -35, 98, -21,
+ 90, -27, 126, -5]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128",
+ [-126, -124, -126, -128, -126, -126, -126, -128])
+b1 = Parameter("op3", "TENSOR_INT32", "{2}, 0.25f, 0", [0, 0])
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 2}, 1.f, -128")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-124, -112, -124, -96, -124, -64, -124, 0]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [-120, -80]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 2}, 1.f, -128")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-124, -112, -124, -96, -124, -64, -124, 0],
+ f1:
+ [-126, -124, -126, -128, -126, -126, -126, -128],
+ b1:
+ [0, 0]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [-120, -80]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+f1 = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128",
+ [-126, -124, -126, -128, -126, -126, -126, -128])
+b1 = Parameter("op3", "TENSOR_INT32", "{2}, 0.25f, 0", [0, 0])
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1,1,1,2}, 1.f, -128")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-124, -112, -124, -96, -124, -64, -124, 0]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [-120, -80]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+f1 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+stride = Int32Scalar("stride", 1)
+cm = Int32Scalar("channelMultiplier", 1)
+output = Output("op4", "TENSOR_QUANT8_ASYMM_SIGNED", "{1,1,1,2}, 1.f, -128")
+
+model = model.Operation("DEPTHWISE_CONV_2D",
+ i1, f1, b1,
+ pad0, pad0, pad0, pad0,
+ stride, stride,
+ cm, act).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-124, -112, -124, -96, -124, -64, -124, 0],
+ f1:
+ [-126, -124, -126, -128, -126, -126, -126, -128],
+ b1:
+ [0, 0]}
+# (i1 (depthconv) f1)
+output0 = {output: # output 0
+ [-120, -80]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# DEPTHWISE_CONV2D_NCHW, pad = 0, stride = 1, cm = 2, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0., .2, 0., .25, 0., 0., .3, .25, 0., 0., 0., .25, .1, 0., 0.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 2, 0, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ b1: ("TENSOR_INT32", 0.005, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.01, 0.005, 0.01, 0.005])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.005, 0.0025, 0.005, 0.0025], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ f1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.01, 0.005, 0.01, 0.005])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.005, 0.0025, 0.005, 0.0025], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.0001, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ o1: [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# DEPTHWISE_CONV2D_NCHW_2, pad = valid, stride = 1, cm = 2, act = none
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 2, 2}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 1, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 2, 1, 1, 2, 0, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b2: ("TENSOR_INT32", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -28)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.5, 0.25, 0.5, 0.25])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.125, 0.25, 0.125], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -28)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12],
+ o2: [71, -34, 99, -20, 91, -26, 127, -4]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# DEPTHWISE_CONV2D_NCHW_LARGE, pad = 0, stride = 1, cm = 1, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+f3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 2}", [.25, 0, .25, 1, .25, 0, .25, 1])
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [100, 200])
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+Model("large").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ f3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, 0),
+ b3: ("TENSOR_INT32", 0.0625, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, 0)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f3: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.125, 0.25])),
+ b3: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [10, 21, 10, 22, 10, 23, 10, 24],
+ o3: [110, 246]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# DEPTHWISE_CONV2D_NCHW_LARGE, pad = 0, stride = 1, cm = 1, act = none
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+f4 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0, 10, 50, .25, 1, 20, 50, .25, 0, 30, 50, .25, 1, 40, 50])
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [6000, 7000, 8000, 9000])
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 4}")
+Model("large").Operation("DEPTHWISE_CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ b4: ("TENSOR_INT32", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 50.0, -128)
+})
+channelquant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ f4: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[1.0, 2.0, 1.0, 1.0])),
+ b4: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 0.5, 0.5], hide=True)),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 50.0, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [10, 21, 10, 0,
+ 10, 22, 20, 0,
+ 10, 23, 30, 0,
+ 10, 24, 40, 0],
+ o4: [6010, 7046, 11000, 9000]
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+# quantized with scale product greater than output scale
+input_scale = 256.5 / 255
+input_zero_point = -1
+filter_scale = 256.5 / 255
+filter_zero_point = 0
+i9 = Input("op1",
+ ("TENSOR_QUANT8_ASYMM_SIGNED", [1, 3, 2, 2], input_scale, input_zero_point))
+f9 = Parameter(
+ "op2",
+ ("TENSOR_QUANT8_ASYMM_SIGNED", [1, 2, 2, 4], filter_scale, filter_zero_point), [
+ 1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14,
+ 15, -16
+ ])
+b9 = Parameter("op3", ("TENSOR_INT32", [4], input_scale * filter_scale, 0),
+ [2, 4, 6, 8])
+o9 = Output("op4", ("TENSOR_QUANT8_ASYMM_SIGNED", [1, 2, 1, 4], 1.0, -1))
+model9 = Model("quant_output_multiplier_gt_1").Operation("DEPTHWISE_CONV_2D", i9, f9, b9, 2, 1, 1, 2,
+ 0).To(o9)
+
+# Instantiate an example
+example = Example({
+ i9: [1, 3, 13, 15, 5, 7, 17, 19, 9, 11, 21, 23],
+ o9: [127, -70, 127, -41, 127, -54, 127, -9]
+}, model=model9)
diff --git a/nn/runtime/test/specs/V1_3/dequantize_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/dequantize_quant8_signed.mod.py
new file mode 100644
index 000000000..b8347b535
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/dequantize_quant8_signed.mod.py
@@ -0,0 +1,106 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, -128")
+i2 = Output("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+model = model.Operation("DEQUANTIZE", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -96, 0, 127]}
+
+output0 = {i2: # output 0
+ [0.0, 32.0, 128.0, 255.0]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+def test(name, input0, output0, input0_data, output0_data):
+ model = Model().Operation("DEQUANTIZE", input0).To(output0)
+ example = Example({
+ input0: input0_data,
+ output0: output0_data,
+ },
+ model=model,
+ name=name).AddVariations("relaxed", "float16")
+
+
+test(
+ name="1d_quant8_asymm",
+ input0=Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{10}, 0.5, -1"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{10}"),
+ input0_data=[-128, -127, -126, -125, -124, 123, 124, 125, 126, 127],
+ output0_data=[-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64],
+)
+
+test(
+ name="2d_quant8_asymm",
+ input0=Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 5}, 0.5, -1"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{2, 5}"),
+ input0_data=[-128, -127, -126, -125, -124, 123, 124, 125, 126, 127],
+ output0_data=[-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64],
+)
+
+# FLOAT16
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, -128")
+i2 = Output("op2", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+model = model.Operation("DEQUANTIZE", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -96, 0, 127]}
+
+output0 = {i2: # output 0
+ [0.0, 32.0, 128.0, 255.0]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+# Zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2}, 0.1f, 0", [9, 1]) # scores
+p2 = Parameter("roi", "TENSOR_QUANT16_ASYMM", "{1, 8}, 0.125f, 0", [8, 8, 80, 80, 0, 0, 80, 80]) # roi
+o1 = Output("scoresOut", "TENSOR_QUANT8_ASYMM_SIGNED", "{0}, 0.1f, 0") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_QUANT16_ASYMM", "{0, 4}, 0.125f, 0") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 1}, 0.1f, 0")
+zero_sized = Internal("featureMap", "TENSOR_QUANT8_ASYMM_SIGNED", "{0, 2, 2, 1}, 0.1f, 0")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# DEQUANTIZE op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("DEQUANTIZE", zero_sized).To(o3)
+
+float16 = DataTypeConverter().Identify({o3: ("TENSOR_FLOAT16",)})
+
+Example({
+ i1: [-127],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations("relaxed", float16)
diff --git a/nn/runtime/test/specs/V1_3/embedding_lookup_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/embedding_lookup_quant8_signed.mod.py
new file mode 100644
index 000000000..2f5202056
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/embedding_lookup_quant8_signed.mod.py
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+lookups = 3
+rows = 3
+columns = 2
+features = 4
+
+actual_values = [x for x in range(rows * columns * features)]
+for i in range(rows):
+ for j in range(columns):
+ for k in range(features):
+ actual_values[(i * columns + j) * features + k] = i + j / 10. + k / 100.
+
+model = Model()
+index = Input("index", "TENSOR_INT32", "{%d}" % lookups)
+value = Input("value", "TENSOR_FLOAT32",
+ "{%d, %d, %d}" % (rows, columns, features))
+output = Output("output", "TENSOR_FLOAT32",
+ "{%d, %d, %d}" % (lookups, columns, features))
+model = model.Operation("EMBEDDING_LOOKUP", index, value).To(output)
+
+input0 = {index: [1, 0, 2], value: actual_values}
+
+output0 = {
+ output: [
+ 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, # Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, # Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, # Row 2
+ ]
+}
+
+quant8_signed = DataTypeConverter().Identify({
+ value: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1),
+ output: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1)
+})
+
+# Not including default because it is tested in 1.0 spec
+Example((input0, output0)).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/equal_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/equal_quant8_signed.mod.py
new file mode 100644
index 000000000..f4f8a6145
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/equal_quant8_signed.mod.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 2.0, 0)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[1], # effectively 2
+ output_data=[False, True, False],
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.0, 1)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[3], # effectively 2
+ output_data=[False, True, False],
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[-128],
+ input1_data=[72],
+ output_data=[False],
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[72],
+ input1_data=[-128],
+ output_data=[False],
+)
diff --git a/nn/runtime/test/specs/V1_3/expand_dims_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/expand_dims_quant8_signed.mod.py
new file mode 100644
index 000000000..eea269cf6
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/expand_dims_quant8_signed.mod.py
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+
+output0 = Output("output", "TENSOR_FLOAT32", "{1, 2, 2}")
+output1 = Output("output", "TENSOR_FLOAT32", "{2, 1, 2}")
+output2 = Output("output", "TENSOR_FLOAT32", "{2, 2, 1}")
+output3 = output2
+
+model0 = Model().Operation("EXPAND_DIMS", input0, 0).To(output0)
+model1 = Model().Operation("EXPAND_DIMS", input0, 1).To(output1)
+model2 = Model().Operation("EXPAND_DIMS", input0, 2).To(output2)
+model3 = Model().Operation("EXPAND_DIMS", input0, -1).To(output3)
+
+data = [1.2, -3.4, 5.6, 7.8]
+
+for model, output in [(model0, output0),
+ (model1, output1),
+ (model2, output2),
+ (model3, output3)]:
+ quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ })
+
+ Example({
+ input0: data,
+ output: data,
+ }, model=model).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/fully_connected_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/fully_connected_quant8_signed.mod.py
new file mode 100644
index 000000000..a1e3bf201
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/fully_connected_quant8_signed.mod.py
@@ -0,0 +1,187 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 1, 5, 1}, 0.5f, -1")
+weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 10}, 0.5f, -1",
+ [1, 3, 5, 7, 9, 11, 13, 15, 17, 19,
+ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19,
+ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
+bias = Parameter("b0", "TENSOR_INT32", "{3}, 0.25f, 0", [4, 8, 12])
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 1.f, -1")
+act_relu = Int32Scalar("act_relu", 1)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [1, 3, 5, 7, 9, 11, 13, 15, -19, -21,
+ 1, 3, 5, 7, 9, 11, 13, -17, 17, -21]}
+output0 = {out0: # output 0
+ [23, 24, 25, 57, 58, 59]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # batch = 1, input_size = 5
+weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128", [-118, -108, -108, -108, -118]) # num_units = 1, input_size = 5
+bias = Parameter("b0", "TENSOR_INT32", "{1}, 0.04, 0", [10])
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 1.f, -128") # batch = 1, number_units = 1
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [-118, -118, -118, -118, -118]}
+output0 = {out0: # output 0
+ [-96]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # batch = 1, input_size = 5
+weights = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # num_units = 1, input_size = 5
+bias = Input("b0", "TENSOR_INT32", "{1}, 0.04, 0")
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 1.f, -128") # batch = 1, number_units = 1
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [-118, -118, -118, -118, -118],
+ weights:
+ [-118, -108, -108, -108, -118],
+ bias:
+ [10]}
+output0 = {out0: # output 0
+ [-96]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1}, 0.5f, -128")
+weights = Parameter("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 0.5f, -128", [-126])
+bias = Parameter("b0", "TENSOR_INT32", "{1}, 0.25f, 0", [4])
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1}, 1.f, -128")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [-126, -96, -112]}
+output0 = {out0: # output 0
+ [-126, -111, -119]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1}, 0.5f, -128")
+weights = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 0.5f, -128")
+bias = Input("b0", "TENSOR_INT32", "{1}, 0.25f, 0")
+out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1}, 1.f, -128")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [-126, -96, -112],
+ weights: [-126],
+ bias: [4]}
+output0 = {out0: # output 0
+ [-126, -111, -119]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [4])
+out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+quant8_signed_mult_gt_1 = DataTypeConverter(name="quant8_mult_gt_1").Identify({
+ in0: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1),
+ weights: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -8),
+ bias: ("TENSOR_INT32", 0.25, 0),
+ out0: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+})
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [2, 32, 16]}
+output0 = {out0: # output 0
+ [8, 68, 36]}
+
+# Instantiate an example
+Example((input0, output0)).AddVariations(quant8_signed_mult_gt_1, includeDefault=False)
+
+#######################################################
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 3}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# FULLY_CONNECTED op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{1, 3}", [1, 2, 3]) # weights
+b = Parameter("bias", "TENSOR_FLOAT32", "{1}", [1]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1}") # out
+model = model.Operation("FULLY_CONNECTED", zero_sized, w, b, 0).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1, 2, 3],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/gather_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/gather_quant8_signed.mod.py
new file mode 100644
index 000000000..8e34c258d
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/gather_quant8_signed.mod.py
@@ -0,0 +1,140 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 3, 2}")
+axis = 1
+indices = Input("indices", "TENSOR_INT32", "{3, 2}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{1, 3, 2, 2}")
+
+model = Model().Operation("GATHER", input0, axis, indices).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+})
+
+Example({
+ input0: [1, 2,
+ 3, 4,
+ 5, 6],
+ indices: [2, 0,
+ 1, 0,
+ 0, 1],
+ output0: [5, 6,
+ 1, 2,
+ 3, 4,
+ 1, 2,
+ 1, 2,
+ 3, 4],
+}, model=model).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+def test(input0, axis, indices, output0, input_data, output_data):
+ model = Model().Operation("GATHER", input0, axis, indices).To(output0)
+
+ quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ })
+
+ Example({
+ input0: input_data,
+ output0: output_data,
+ }, model=model).AddVariations(quant8_signed, includeDefault=False)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 2}"),
+ axis=0,
+ indices=[1, 0],
+ output0=Output("output0", "TENSOR_FLOAT32", "{2, 2}"),
+ input_data=[-2.0, 0.2,
+ 0.7, 0.8],
+ output_data=[0.7, 0.8,
+ -2.0, 0.2],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 2}"),
+ axis=0,
+ indices=[1], # Unlike TensorFlow, 0-D arguments and outputs are not supported.
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 2}"),
+ input_data=[-2.0, 0.2,
+ 0.7, 0.8],
+ output_data=[0.7, 0.8],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ axis=0,
+ indices=[1],
+ output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+ input_data=[1, 2, 3],
+ output_data=[2],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ axis=0,
+ indices=[1, 0],
+ output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+ input_data=[1, 2, 3],
+ output_data=[2, 1],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 2, 2}"),
+ axis=0,
+ indices=[0, 0],
+ output0=Output("output0", "TENSOR_FLOAT32", "{2, 2, 2}"),
+ input_data=[-2.0, 0.2,
+ 0.7, 0.8],
+ output_data=[-2.0, 0.2,
+ 0.7, 0.8,
+ -2.0, 0.2,
+ 0.7, 0.8],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 1}"),
+ axis=0,
+ indices=[1, 3],
+ output0=Output("output0", "TENSOR_FLOAT32", "{2, 1}"),
+ input_data=[-2.0, 0.2, 0.7, 0.8],
+ output_data=[0.2, 0.8],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 2, 3}"),
+ axis=1,
+ indices=[1, 0],
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 2, 3}"),
+ input_data=[1, 2, 3,
+ 4, 5, 6],
+ output_data=[4, 5, 6,
+ 1, 2, 3],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 2, 3}"),
+ axis=-1,
+ indices=[2, 0],
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 2, 2}"),
+ input_data=[1, 2, 3,
+ 4, 5, 6],
+ output_data=[3, 1,
+ 6, 4],
+)
diff --git a/nn/runtime/test/specs/V1_3/generate_proposals_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/generate_proposals_quant8_signed.mod.py
new file mode 100644
index 000000000..dee479d36
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/generate_proposals_quant8_signed.mod.py
@@ -0,0 +1,211 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+model = Model()
+i1 = Input("scores", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # scores
+i2 = Input("bboxDeltas", "TENSOR_FLOAT32", "{1, 2, 2, 8}") # bounding box deltas
+i3 = Input("anchors", "TENSOR_FLOAT32", "{2, 4}") # anchors
+i4 = Input("imageInfo", "TENSOR_FLOAT32", "{1, 2}") # image info
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{4}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{4, 4}") # roi out
+o3 = Output("batchSplit", "TENSOR_INT32", "{4}") # batch split out
+model = model.Operation("GENERATE_PROPOSALS",
+ i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -28),
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.05, 0),
+ i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
+ i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -28),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.8, 0.9, 0.85, 0.85,
+ 0.75, 0.8, 0.9, 0.95
+ ],
+ i2: [ # bounding box deltas
+ 0.5, 0.1, 0.1, 0.1, 0.5, 0.1, 0.5, 0.1,
+ -0.25, 0.1, -0.1, -0.1, -0.25, 0.1, 0.2, 0.1,
+ 0.4, -0.1, -0.2, 0.2, 0.4, -0.1, -0.2, 0.2,
+ -0.2, -0.2, 0.2, 0.2, -0.2, -0.2, 0.2, 0.2
+ ],
+ i3: [0, 1, 4, 3, 1, 0, 3, 4], # anchors
+ i4: [32, 32], # image info
+}
+
+output0 = {
+ o1: [0.95, 0.9, 0.85, 0.8], # scores out
+ o2: [ # roi out
+ 4.3785973, 2.7571943 , 6.8214025, 7.642805,
+ 1.3512788, 0.18965816, 4.648721 , 4.610342,
+ 3.1903253, 1.2951627 , 6.8096747, 3.1048374,
+ 1.9812691, 3.1571944 , 3.6187308, 8.042806
+ ],
+ o3: [0, 0, 0, 0]
+}
+
+Example((input0, output0)).AddNchw(i1, i2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+model = Model()
+i1 = Input("scores", "TENSOR_FLOAT32", "{2, 4, 4, 4}") # scores
+i2 = Input("bboxDeltas", "TENSOR_FLOAT32", "{2, 4, 4, 16}") # bounding box deltas
+i3 = Input("anchors", "TENSOR_FLOAT32", "{4, 4}") # anchors
+i4 = Input("imageInfo", "TENSOR_FLOAT32", "{2, 2}") # image info
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{30}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{30, 4}") # roi out
+o3 = Output("batchSplit", "TENSOR_INT32", "{30}") # batch split out
+model = model.Operation("GENERATE_PROPOSALS",
+ i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.005, -128),
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
+ i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.005, -128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.885, 0.21 , 0.78 , 0.57 ,
+ 0.795, 0.66 , 0.915, 0.615,
+ 0.27 , 0.69 , 0.645, 0.945,
+ 0.465, 0.345, 0.855, 0.555,
+ 0.48 , 0.6 , 0.735, 0.63 ,
+ 0.495, 0.03 , 0.12 , 0.225,
+ 0.24 , 0.285, 0.51 , 0.315,
+ 0.435, 0.255, 0.585, 0.06 ,
+ 0.9 , 0.75 , 0.18 , 0.45 ,
+ 0.36 , 0.09 , 0.405, 0.15 ,
+ 0. , 0.195, 0.075, 0.81 ,
+ 0.87 , 0.93 , 0.39 , 0.165,
+ 0.825, 0.525, 0.765, 0.105,
+ 0.54 , 0.705, 0.675, 0.3 ,
+ 0.42 , 0.045, 0.33 , 0.015,
+ 0.84 , 0.135, 0.72 , 0.375,
+ 0.495, 0.315, 0.195, 0.24 ,
+ 0.21 , 0.54 , 0.78 , 0.72 ,
+ 0.045, 0.93 , 0.27 , 0.735,
+ 0.135, 0.09 , 0.81 , 0.705,
+ 0.39 , 0.885, 0.42 , 0.945,
+ 0.9 , 0.225, 0.75 , 0.3 ,
+ 0.375, 0.63 , 0.825, 0.675,
+ 0.015, 0.48 , 0.645, 0.615,
+ 0.33 , 0.465, 0.66 , 0.6 ,
+ 0.075, 0.84 , 0.285, 0.57 ,
+ 0.585, 0.165, 0.06 , 0.36 ,
+ 0.795, 0.855, 0.105, 0.45 ,
+ 0. , 0.87 , 0.525, 0.255,
+ 0.69 , 0.555, 0.15 , 0.345,
+ 0.03 , 0.915, 0.405, 0.435,
+ 0.765, 0.12 , 0.51 , 0.18
+ ],
+ i2: [ # bounding box deltas
+ -1.9, 0.4, 1.4, 0.5, -1.5, -0.2, 0.3, 1.2, 0. , -0.6, 0.4, -1.3, 0.8, 0.9, -0.2, 0.8,
+ -0.2, 0. , 0.4, 0.1, -0.2, -1.6, -0.6, -0.1, -1. , 0.6, 0.5, -0.2, -1.7, -1.4, 0.5, -0.1,
+ -1.5, 1.3, -0.7, -0.9, 0.9, 0.2, -0.2, 0. , -0.7, 0.3, -0.4, -0.3, -0.5, -0.3, 1. , -0.7,
+ 1.2, -0.3, 0. , 0.3, -0.7, 1. , -0.2, -0.6, -1.3, 0. , 0.3, 0.1, 0.4, 0.2, 2.4, 0. ,
+ 0.1, 0. , 0.7, -0.9, 0.1, -0.4, 0.3, -0.3, -0.7, 0.1, 0.7, 0. , -0.3, 1.6, 0. , 1.1,
+ 0.4, -0.7, -0.9, 0. , 0. , 0.4, -0.6, 0.4, -1.9, -1.2, 0. , -0.3, 0.2, 0. , 0.1, 0.8,
+ 0. , 0.9, -1.7, 0.3, 0.7, -0.7, 0.7, 1.2, -0.4, -0.1, -0.6, 0.6, -0.4, -0.2, 0.3, -0.5,
+ 0. , 1. , -0.1, -0.3, -0.8, 0.1, -1.2, -2.4, 0.1, 1.4, 0.4, 0.1, -1.1, 0.4, -0.4, -0.2,
+ 0.1, 0. , 0.7, 0.1, -1.3, 0.1, -0.4, -0.2, 0.2, 0.1, -0.8, 0. , -1.4, 2. , -0.6, -0.5,
+ 0. , 1. , -1.4, -1.1, 0.6, -0.7, 0.4, 1.1, -1.1, 1.6, -0.3, 0. , -0.7, 0.3, -1.3, 0. ,
+ 0. , 0. , -0.3, 0. , -1.1, -1.5, 0.9, -1.4, -0.7, 0.1, -1.4, 0.9, 0.1, 0.2, -0.1, -1.7,
+ 0.2, -0.3, -0.9, 1.1, 0.1, 1. , 1. , -0.9, 0.7, 0. , -0.3, 0.2, -0.8, -0.5, 0.6, -1.2,
+ 1. , 0.6, 0. , -1.6, 0.1, -1.2, 0.7, 0.8, 0.5, -0.2, -0.8, -1.3, -0.3, 0. , 0. , 0.3,
+ -0.6, -0.3, 1.3, 0.1, 2.2, 1.2, -1.1, 0.1, 1.2, 1.2, 1.3, -0.9, 0.1, -0.5, 0.1, -0.7,
+ -1.3, 1.3, 0.1, 2. , 0. , 0.2, 0.6, 0. , -0.1, -0.4, -0.5, 0.1, -0.6, -0.3, 0.2, -0.4,
+ -0.4, -0.7, -1.8, 0.4, -0.7, 0.4, 1.4, -0.3, 0.8, 0. , 0.4, -0.1, -1. , 0.2, 0.5, -0.6,
+ -1.1, 0.2, 1.6, -0.2, -0.4, -0.9, 0. , 0.3, 0. , 0.3, -0.3, 0.3, 0.3, 1.9, 0.3, -0.5,
+ -0.8, -1.3, -0.8, 0.2, 0.2, -0.4, -0.3, 0.6, 0.2, -0.2, 1.2, 0. , 0. , -0.3, 0.3, -1.5,
+ -1. , -0.3, -0.7, -0.3, -0.4, -1. , -0.6, -0.7, -0.2, 0.6, -0.3, 0.5, -0.2, 0.3, -0.5, -1.7,
+ 0. , -0.7, -0.1, -1.5, -0.9, 0.6, 0.3, -0.1, 0.2, 0.5, 0.6, -0.8, -0.3, 0.6, 0.9, -0.3,
+ 0.1, -1.7, -1.5, 0. , -0.1, -0.3, 0.7, -0.3, -0.4, 0. , -0.4, -0.3, 0.1, 1.1, 1.8, -0.9,
+ 0.6, 0.5, 0.2, -0.7, 0.2, 0.1, 1.2, 2.2, 0.3, 0.6, 0.4, 0.1, 0.2, 0. , -1.1, -0.2,
+ -0.7, 0. , -1.2, 0.6, -0.6, -0.2, -0.4, 0. , 0.7, -1.2, 0.8, 0. , -0.3, 0.2, 0.6, -1. ,
+ -0.1, -0.1, 0. , -0.4, -0.2, 0.4, -1.4, 0.3, 0.1, 1.3, -0.2, -0.7, 0.6, 0.7, 0.6, 0.1,
+ -0.4, 0.1, -0.2, -0.8, 0. , -1.3, 1.2, 1.4, 1.1, 0.5, 0.3, 0. , 0.1, -0.4, 0.5, -0.1,
+ -0.5, 0.3, -0.7, 0.9, -0.1, -0.4, 0.2, -0.8, 1. , 1. , 0.1, 0.1, -0.2, 0. , -0.4, -0.3,
+ -0.8, 0.7, -0.9, -0.3, -0.3, -2.8, 1. , 1.4, 0. , -2.6, 1.1, -1.1, 0.5, 0.1, -0.4, -1.5,
+ 0. , 0.3, -0.3, -0.2, 0.7, -0.8, -0.1, 0.5, 0.7, 1.4, -1.2, -1. , -0.6, 0.2, 1.1, -0.9,
+ 0.7, -0.4, 0. , 0. , -0.2, -0.2, 0.1, 0. , 0. , -0.7, -0.7, -1.4, -0.9, -0.5, -0.6, 0.4,
+ 0.3, 0. , 0.9, -0.2, 0.7, 1.2, 0.5, 0.8, -0.5, 1. , 0.2, -0.5, 1.3, -0.5, 0.3, 1.2,
+ -0.3, -0.1, 1.3, 0.2, 0.6, -1.4, -0.1, -0.2, -0.4, -0.9, 1.2, -0.9, -0.2, -1.2, -1. , -0.2,
+ -1.6, 2.1, -0.6, -0.2, -0.3, 0.5, 0.9, -0.4, 0. , -0.1, 0.1, -0.6, -1. , -0.7, 0.2, -0.2
+ ],
+ i3: [ # anchors
+ 0, 6, 16, 10,
+ 6, 0, 10, 16,
+ 3, 5, 13, 11,
+ 5, 3, 11, 13
+ ],
+ i4: [64, 64, 32, 32], # image info
+}
+
+output0 = {
+ o1: [ # scores out
+ 0.945, 0.93 , 0.915, 0.9 , 0.87 , 0.84 , 0.81, 0.795, 0.78, 0.765, 0.75, 0.735,
+ 0.72 , 0.705, 0.69 , 0.675, 0.945, 0.915, 0.9 , 0.885, 0.87, 0.84 , 0.81, 0.78,
+ 0.735, 0.72 , 0.63 , 0.6 , 0.585, 0.54
+ ],
+ o2: [ # roi out
+ 16.845154 , 2.5170734, 33.154846 , 7.4829264,
+ 32.96344 , 40.747444 , 43.836563 , 47.252556 ,
+ 0. , 9.143808 , 16.243607 , 14.056192 ,
+ 0. , 25.789658 , 25.710022 , 30.210342 ,
+ 37.947445 , 20.791668 , 44.452557 , 32.80833 ,
+ 30.277609 , 32.21635 , 32.92239 , 38.18365 ,
+ 25.885489 , 29.086582 , 31.314512 , 30.913418 ,
+ 2.8654022, 5.789658 , 26.734598 , 10.210342 ,
+ 0.5408764, 3.5824041, 15.459124 , 5.217595 ,
+ 10.753355 , 35.982403 , 15.246645 , 37.617596 ,
+ 1.4593601, 23.050154 , 4.1406403, 36.149845 ,
+ 0. , 15.6 , 11.068764 , 21.6 ,
+ 38.54088 , 35.28549 , 53.45912 , 40.71451 ,
+ 26.134256 , 48.358635 , 27.465742 , 64. ,
+ 29.96254 , 3.1999998, 33.23746 , 19.2 ,
+ 11.653517 , 43.980293 , 48.34648 , 46.41971 ,
+ 0. , 26.967152 , 26.748941 , 31.032848 ,
+ 28.590324 , 9.050154 , 32. , 22.149847 ,
+ 17.828777 , 19.00683 , 32. , 20.99317 ,
+ 3.5724945, 7.273454 , 11.627505 , 19.126545 ,
+ 4.989658 , 26.8 , 9.410341 , 32. ,
+ 15.157195 , 18.00537 , 20.042807 , 25.194632 ,
+ 30.889404 , 9.652013 , 32. , 12.347987 ,
+ 3.399414 , 3.8000002, 32. , 9.8 ,
+ 24.980408 , 10.086582 , 28.61959 , 11.913418 ,
+ 13.950423 , 3.884349 , 22.049576 , 6.115651 ,
+ 24.259361 , 6.8 , 26.94064 , 22.8 ,
+ 3.6538367, 19.475813 , 13.546164 , 28.524187 ,
+ 11.947443 , 29.318363 , 18.452557 , 32. ,
+ 17.318363 , 0. , 20.281635 , 16.17695
+ ],
+ o3: [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+}
+
+Example((input0, output0)).AddNchw(i1, i2, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/greater_equal_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/greater_equal_quant8_signed.mod.py
new file mode 100644
index 000000000..b0444f4df
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/greater_equal_quant8_signed.mod.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("GREATER_EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 2.0, 0)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[1], # effectively 2
+ output_data=[False, True, True],
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.0, 1)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[3], # effectively 2
+ output_data=[False, True, True],
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[-128],
+ input1_data=[72],
+ output_data=[True],
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[72],
+ input1_data=[-128],
+ output_data=[False],
+)
diff --git a/nn/runtime/test/specs/V1_3/greater_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/greater_quant8_signed.mod.py
new file mode 100644
index 000000000..9502c87c6
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/greater_quant8_signed.mod.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("GREATER", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 2.0, 0)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[1], # effectively 2
+ output_data=[False, False, True],
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.0, 1)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[3], # effectively 2
+ output_data=[False, False, True],
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[-128],
+ input1_data=[72],
+ output_data=[True],
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[72],
+ input1_data=[-128],
+ output_data=[False],
+)
diff --git a/nn/runtime/test/specs/V1_3/grouped_conv2d_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/grouped_conv2d_quant8_signed.mod.py
new file mode 100644
index 000000000..358613408
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/grouped_conv2d_quant8_signed.mod.py
@@ -0,0 +1,135 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: GROUPED_CONV2D, pad = 0, stride = 1, numGroups = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 1}", [1, 2, 2, 1, 4, 3, 2, 1]) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [10, -33.5]) # bias
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # output 0
+Model().Operation("GROUPED_CONV_2D", i1, w1, b1, 0, 0, 0, 0, 1, 1, 2, act, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
+})
+
+quant8_mult_gt_1_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.05, -48)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
+})
+
+channelQuant8_signed_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
+})
+
+example = Example({
+ i1: [1, 2, 3, 4, 5, 6,
+ 6, 5, 4, 3, 2, 1,
+ 2, 3, 3, 3, 3, 3],
+ o1: [33, -0.5,
+ 33, 7.5,
+ 31, 4.5,
+ 27, -9.5]
+}).AddNchw(i1, o1, layout).AddAllActivations(o1, act).AddVariations(quant8_signed, quant8_mult_gt_1_signed, channelQuant8_signed, channelQuant8_signed_mult_gt_1, includeDefault=False)
+
+
+# TEST 2: GROUPED_CONV2D_LARGE, pad = same, stride = 1, numGroups = 2, act = none
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 2, 2}") # input 0
+w2 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 3, 1}", [100, 20, 1, 200, 10, 2, 200, 30, 1, 100, 20, 3]) # weight
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [500, -1000]) # bias
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 2, 2}") # output 0
+Model("large").Operation("GROUPED_CONV_2D", i2, w2, b2, 1, 1, 1, 2, 0, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ w2: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -128),
+ b2: ("TENSOR_INT32", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 10.0, -28)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ w2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[2.0, 2.5])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.625], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 10.0, -28)
+})
+
+example = Example({
+ i2: [1, 2, 3, 4,
+ 4, 3, 2, 1,
+ 2, 3, 3, 3],
+ o2: [567, -873,
+ 1480, -160,
+ 608, -840,
+ 1370, -10,
+ 543, -907,
+ 760, -310]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
+
+
+# TEST 3: GROUPED_CONV2D_CHANNEL, pad = same, stride = 1, numGroups = 3, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 9}") # input 0
+w3 = Parameter("op2", "TENSOR_FLOAT32", "{6, 1, 1, 3}", [1, 2, 3, 2, 1, 0, 2, 3, 3, 6, 6, 6, 9, 8, 5, 2, 1, 1]) # weight
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{6}", [10, -20, 30, -40, 50, -60]) # bias
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 6}") # output 0
+Model("channel").Operation("GROUPED_CONV_2D", i3, w3, b3, 1, 1, 1, 3, 0, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ w3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ b3: ("TENSOR_INT32", 0.125, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -68)
+})
+
+channelQuant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ w3: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.3] * 3)),
+ b3: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125, 0.15] * 3, hide=True)),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -68)
+})
+
+example = Example({
+ i3: [1, 2, 3, 4, 55, 4, 3, 2, 1,
+ 5, 4, 3, 2, 11, 2, 3, 4, 5,
+ 2, 3, 2, 3, 22, 3, 2, 3, 2,
+ 1, 0, 2, 1, 33, 1, 2, 0, 1],
+ o3: [24, -16, 215, 338, 98, -51,
+ 32, -6, 73, 50, 134, -45,
+ 24, -13, 111, 128, 102, -51,
+ 17, -18, 134, 170, 73, -55]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/heatmap_max_keypoint_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/heatmap_max_keypoint_quant8_signed.mod.py
new file mode 100644
index 000000000..a860cea33
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/heatmap_max_keypoint_quant8_signed.mod.py
@@ -0,0 +1,130 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+heatmap2 = Input("heatmap", "TENSOR_FLOAT32", "{2, 4, 4, 4}")
+boxes2 = Input("boxes", "TENSOR_FLOAT32", "{2, 4}")
+score2 = Output("score", "TENSOR_FLOAT32", "{2, 4}")
+keypoint2 = Output("keypoint", "TENSOR_FLOAT32", "{2, 4, 2}")
+Model().Operation("HEATMAP_MAX_KEYPOINT", heatmap2, boxes2, layout).To(score2, keypoint2)
+
+quant8_signed = DataTypeConverter().Identify({
+ heatmap2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
+ boxes2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ score2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ keypoint2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+Example({
+ heatmap2: [
+ 0.19, 0.61, 0.49, 0.01, 0.98, 0.65, 0.64, 0.70, 0.76, 0.55,
+ 0.83, 0.19, 0.46, 0.03, 0.67, 0.71, 0.17, 0.23, 0.89, 0.08,
+ 0.96, 0.65, 0.52, 0.40, 0.36, 0.80, 0.55, 0.89, 0.58, 0.29,
+ 0.27, 0.69, 0.66, 0.06, 0.51, 0.26, 0.96, 0.38, 0.41, 0.89,
+ 0.88, 0.46, 0.96, 0.73, 0.54, 0.64, 0.84, 0.74, 0.51, 0.41,
+ 0.13, 0.19, 0.52, 0.21, 0.50, 0.75, 0.89, 0.89, 0.20, 0.58,
+ 0.70, 0.13, 0.29, 0.39,
+ 0.91, 0.06, 0.93, 0.34, 0.80, 0.87, 0.59, 0.67, 0.57, 0.85,
+ 0.24, 0.25, 0.76, 0.34, 0.37, 0.11, 0.00, 0.29, 0.30, 0.77,
+ 0.34, 0.57, 0.48, 0.76, 0.93, 0.18, 0.64, 0.12, 0.67, 0.47,
+ 0.56, 0.50, 0.48, 0.99, 0.46, 0.66, 0.98, 0.06, 0.10, 0.66,
+ 0.66, 0.91, 0.67, 0.23, 0.40, 0.37, 0.17, 0.35, 0.48, 0.98,
+ 0.47, 0.49, 0.56, 0.18, 0.75, 0.29, 0.04, 0.23, 0.42, 0.55,
+ 0.38, 0.07, 0.71, 0.80
+ ],
+ boxes2: [
+ 5, 2, 10, 20,
+ 1, 7, 30, 10
+ ],
+ score2: [
+ 1.020210, 0.890556, 1.007110, 0.945129,
+ 0.987798, 1.073820, 0.930000, 0.800000
+ ],
+ keypoint2: [
+ 7.227723, 4.250000,
+ 8.090278, 17.750000,
+ 8.523379, 12.589181,
+ 8.365580, 10.122508,
+ 12.431603, 8.934225,
+ 4.625000, 9.239437,
+ 4.625000, 7.375000,
+ 26.375000, 9.625000
+ ]
+}).AddNchw(heatmap2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+heatmap3 = Input("heatmap", "TENSOR_FLOAT32", "{5, 4, 4, 1}")
+boxes3 = Input("boxes", "TENSOR_FLOAT32", "{5, 4}")
+score3 = Output("score", "TENSOR_FLOAT32", "{5, 1}")
+keypoint3 = Output("keypoint", "TENSOR_FLOAT32", "{5, 1, 2}")
+Model().Operation("HEATMAP_MAX_KEYPOINT", heatmap3, boxes3, layout).To(score3, keypoint3)
+
+quant8_signed = DataTypeConverter().Identify({
+ heatmap3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ boxes3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ score3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -118),
+ keypoint3: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+Example({
+ heatmap3: [
+ -10, -1, 4, -5, # batch0
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10, -1, 4, -5, # batch1 - test mirror bottom
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ -10, -1, 4, -5, # batch2 - test mirror left
+ -8, -2, 4, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10, -1, 4, 10, # batch3 - test mirror top right
+ -8, -2, 4, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10,-56, 4, -5, # batch4 - test out of range delta
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5
+ ],
+ boxes3: [
+ 5, 2, 10, 20,
+ 1, 7, 30, 10,
+ 8, 3, 15, 13,
+ 6, 5, 19, 12,
+ 5, 2, 10, 20
+ ],
+ score3: [
+ 9.071493,
+ 10.00500,
+ 7.187500,
+ 10.00000,
+ 10.689667
+ ],
+ keypoint3: [
+ 8.224462, 8.537316,
+ 11.73000, 9.625000,
+ 8.875000, 9.562500,
+ 17.37500, 5.875000,
+ 9.569672, 2.000000
+ ]
+}).AddNchw(heatmap3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/l2_normalization_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/l2_normalization_quant8_signed.mod.py
new file mode 100644
index 000000000..5c998fa5b
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/l2_normalization_quant8_signed.mod.py
@@ -0,0 +1,81 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # input 0
+o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -96),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0 / 128, 0)
+})
+
+example0 = {
+ i1: [ 0, 3, 4,
+ 3, 0, 4,
+ 8, 6, 0,
+ 12, 0, 9,
+ 9, 12, 20,
+ 12, 15, 16,
+ 20, 9, 12,
+ 16, 15, 12],
+ o1: [0.00, 0.60, 0.80,
+ 0.60, 0.00, 0.80,
+ 0.80, 0.60, 0.00,
+ 0.80, 0.00, 0.60,
+ 0.36, 0.48, 0.80,
+ 0.48, 0.60, 0.64,
+ 0.80, 0.36, 0.48,
+ 0.64, 0.60, 0.48]
+}
+
+# All dimensions, with all possible axis parameter
+Model().Operation("L2_NORMALIZATION", i1, axis).To(o1)
+Example(example0).AddAllDimsAndAxis(i1, o1, axis).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # input 0
+o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -96),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0 / 128, 0)
+})
+
+example0 = {
+ i1: [ 0, 3, 4,
+ 3, 0, 4,
+ 8, 6, 0,
+ 12, 0, 9,
+ 9, 12, 20,
+ 12, 15, 16,
+ 20, 9, 12,
+ 16, 15, 12],
+ o1: [0.00, 0.60, 0.80,
+ 0.60, 0.00, 0.80,
+ 0.80, 0.60, 0.00,
+ 0.80, 0.00, 0.60,
+ 0.36, 0.48, 0.80,
+ 0.48, 0.60, 0.64,
+ 0.80, 0.36, 0.48,
+ 0.64, 0.60, 0.48]
+}
+
+# All dimensions other than 4, without axis parameter
+Model().Operation("L2_NORMALIZATION", i1).To(o1)
+Example(example0).AddAllDims(i1, o1).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/less_equal_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/less_equal_quant8_signed.mod.py
new file mode 100644
index 000000000..a81349e68
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/less_equal_quant8_signed.mod.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("LESS_EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 2.0, 0)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[1], # effectively 2
+ output_data=[True, True, False],
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.0, 1)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[3], # effectively 2
+ output_data=[True, True, False],
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[-128],
+ input1_data=[72],
+ output_data=[False],
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[72],
+ input1_data=[-128],
+ output_data=[True],
+)
diff --git a/nn/runtime/test/specs/V1_3/less_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/less_quant8_signed.mod.py
new file mode 100644
index 000000000..4d8b0fc6e
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/less_quant8_signed.mod.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("LESS", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 2.0, 0)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[1], # effectively 2
+ output_data=[True, False, False],
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.0, 1)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[3], # effectively 2
+ output_data=[True, False, False],
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[-128],
+ input1_data=[72],
+ output_data=[False],
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[72],
+ input1_data=[-128],
+ output_data=[True],
+)
diff --git a/nn/runtime/test/specs/V1_3/logistic_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/logistic_quant8_signed.mod.py
new file mode 100644
index 000000000..17dd369d5
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/logistic_quant8_signed.mod.py
@@ -0,0 +1,109 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import math
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{1, 2, 2, 1}, 0.00390625f, -128")
+model = model.Operation("LOGISTIC", i1).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [-128, -127, -126, -1]
+}
+
+output0 = {
+ i3: # output 0
+ [0, 31, 59, 127]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+d0 = 1 #2
+d1 = 16 #256
+d2 = 16 #256
+d3 = 1 #2
+
+i0 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d, %d, %d, %d}, .5f, -128" % (d0, d1, d2, d3))
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d, %d, %d, %d}, 0.00390625f, -128" % (d0, d1, d2, d3))
+model = model.Operation("LOGISTIC", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r=rng: [x % 256 for x in range(r)])()
+output_values = [
+ 255 if 1. / (1. + math.exp(-x * .5)) * 256 > 255 else int(
+ round(1. / (1. + math.exp(-x * .5)) * 256)) for x in input_values
+]
+
+input0 = {i0: [val - 128 for val in input_values]}
+output0 = {output: [val - 128 for val in output_values]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+# Zero-sized input test
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}",
+ [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3,
+ -1, 0, 0.4, 1.0,
+ 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4,
+ layout).To(zero_sized)
+
+# LOGISTIC op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("LOGISTIC", zero_sized).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0 / 256, -128)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(
+ quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/max_pool_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/max_pool_quant8_signed.mod.py
new file mode 100644
index 000000000..dde99c81a
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/max_pool_quant8_signed.mod.py
@@ -0,0 +1,301 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # input 0
+cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # output 0
+model = model.Operation("MAX_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(i3)
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+output0 = {i3: # output 0
+ [-127, -126, -125, -124]}
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+bat = 5
+row = 50
+col = 70
+chn = 3
+
+i0 = Input("i0", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 0.5f, -128" % (bat, row, col, chn))
+
+std = 20
+flt = 20
+pad = 0
+
+stride = Int32Scalar("stride", std)
+filt = Int32Scalar("filter", flt)
+padding = Int32Scalar("padding", pad)
+act0 = Int32Scalar("activation", 0)
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d, %d, %d, %d}, 0.5f, -128" % (bat, output_row, output_col, chn))
+
+model = model.Operation(
+ "MAX_POOL_2D", i0, padding, padding, padding, padding, stride, stride, filt, filt, act0).To(output)
+
+# Example 1. Input in operand 0
+input_range = bat * row * col * chn
+input_values = (lambda s = std, r = input_range: [x % s + 1 for x in range(r)])()
+output_range = bat * output_row * output_col * chn
+output_values = (lambda s = std, r = output_range: [ s for _ in range(r)])()
+input0 = {i0: [val - 128 for val in input_values]}
+output0 = {output: [val - 128 for val in output_values]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+bat = 5
+row = 50
+col = 70
+chn = 3
+
+i0 = Input("i0", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 0.5f, -128" % (bat, row, col, chn))
+
+std = 20
+flt = 20
+pad = 0
+
+stride = Int32Scalar("stride", std)
+filt = Int32Scalar("filter", flt)
+padding = Int32Scalar("padding", pad)
+act2 = Int32Scalar("relu1_activation", 2)
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d, %d, %d, %d}, 0.5f, -128" % (bat, output_row, output_col, chn))
+
+model = model.Operation(
+ "MAX_POOL_2D", i0, padding, padding, padding, padding, stride, stride, filt, filt, act2).To(output)
+
+# Example 1. Input in operand 0
+input_range = bat * row * col * chn
+input_values = (lambda s = std, r = input_range: [x % s + 1 for x in range(r)])()
+output_range = bat * output_row * output_col * chn
+output_values = (lambda r = output_range: [ 2 for _ in range(r)])()
+
+input0 = {i0: [val - 128 for val in input_values]}
+output0 = {output: [val - 128 for val in output_values]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 4, 1}, 0.0625f, -128") # input 0
+cons2 = Int32Scalar("cons2", 2)
+pad_same = Int32Scalar("pad_same", 1)
+act_none = Int32Scalar("act_none", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 2, 1}, 0.0625f, -128") # output 0
+model = model.Operation("MAX_POOL_2D", i1, pad_same, cons2, cons2, cons2, cons2, act_none).To(i3)
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -32, -96, -64, -80, -96, 32, -16]}
+output0 = {i3: # output 0
+ [-32, 32]}
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: MAX_POOL_2D_NCHW_1, pad = 0, stride = 1, filter = 1, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("MAX_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 2.0, 3.0, 4.0],
+ o1: [1.0, 2.0, 3.0, 4.0]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# MAX_POOL_2D_NCHW_2, act = none
+
+bat = 5
+row = 50
+col = 70
+chn = 3
+std = 20
+flt = 20
+pad = 0
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i2 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o2 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("MAX_POOL_2D", i2, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [x % std + 1 for x in range(bat * row * col * chn)],
+ o2: [std for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# MAX_POOL_2D_NCHW_3, act = relu6
+
+bat = 5
+row = 50
+col = 70
+chn = 3
+std = 20
+flt = 20
+pad = 0
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i3 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o3 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("MAX_POOL_2D", i3, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [x % std + 1 for x in range(bat * row * col * chn)],
+ o3: [6 for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# MAX_POOL_2D_NCHW_4, pad = same, stride = 2, filter = 2, act = none
+
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 2, 1}")
+Model().Operation("MAX_POOL_2D", i4, 1, 2, 2, 2, 2, 0, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [0, 6, 2, 4, 3, 2, 10, 7],
+ o4: [6, 10]
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MAX_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("MAX_POOL_2D", zero_sized, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+
+#######################################################
+# zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MAX_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("MAX_POOL_2D", zero_sized, 1, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/maximum_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/maximum_quant8_signed.mod.py
new file mode 100644
index 000000000..7856fcd0a
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/maximum_quant8_signed.mod.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("MAXIMUM", input0, input1).To(output0)
+
+ quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ input1: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -28],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -48],
+ })
+
+ Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name).AddVariations(quant8_signed, includeDefault=False)
+
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, 11.0, -2.0, -1.44],
+ input1_data=[-1.0, 0.0, 1.0, 12.0, -3.0, -1.43],
+ output_data=[1.0, 0.0, 1.0, 12.0, -2.0, -1.43],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, -2.0, -1.44, 11.0],
+ input1_data=[0.5, 2.0],
+ output_data=[1.0, 2.0, 0.5, 2.0, 0.5, 11.0],
+)
+
+
+# Test overflow and underflow.
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0f, 0")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0f, 0")
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 0.5f, 0")
+model = Model().Operation("MAXIMUM", input0, input1).To(output0)
+
+Example({
+ input0: [-68, 0],
+ input1: [0, 72],
+ output0: [0, 127],
+}, model=model, name="overflow")
diff --git a/nn/runtime/test/specs/V1_3/mean_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/mean_quant8_signed.mod.py
new file mode 100644
index 000000000..606a1bc31
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/mean_quant8_signed.mod.py
@@ -0,0 +1,60 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 3, 2}, 0.8, -123")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 0.8, -123")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+input0 = {
+ i1: [
+ -127, -126, -125, -124, -123, -122, -121, -120, -119, -118, -117, -116,
+ -115, -114, -113, -112, -111, -110, -109, -108, -107, -106, -105, -104
+ ]
+}
+
+output0 = {
+ output: # output 0
+ [-116, -115]
+}
+
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 3, 2}, 0.8, -123")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 1}, 0.8, -123")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+input0 = {
+ i1: [
+ -127, -126, -125, -124, -123, -122, -121, -120, -119, -118, -117, -116,
+ -115, -114, -113, -112, -111, -110, -109, -108, -107, -106, -105, -104
+ ]
+}
+
+output0 = {
+ output: # output 0
+ [-118, -116, -114]
+}
+
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_3/minimum_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/minimum_quant8_signed.mod.py
new file mode 100644
index 000000000..31a3e3692
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/minimum_quant8_signed.mod.py
@@ -0,0 +1,63 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("MINIMUM", input0, input1).To(output0)
+
+ quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ input1: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -28],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -48],
+ })
+
+ Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name).AddVariations(quant8_signed, includeDefault=False)
+
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, 11.0, -2.0, -1.44],
+ input1_data=[-1.0, 0.0, 1.0, 12.0, -3.0, -1.43],
+ output_data=[-1.0, 0.0, -1.0, 11.0, -3.0, -1.44],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, -2.0, -1.44, 11.0],
+ input1_data=[0.5, 2.0],
+ output_data=[0.5, 0.0, -1.0, -2.0, -1.44, 2.0],
+)
+
+# Test overflow and underflow.
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0f, 0")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0f, 0")
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 0.5f, 0")
+model = Model().Operation("MINIMUM", input0, input1).To(output0)
+
+Example({
+ input0: [-68, 0],
+ input1: [0, 72],
+ output0: [-128, 0],
+}, model=model, name="overflow")
diff --git a/nn/runtime/test/specs/V1_3/mul_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/mul_quant8_signed.mod.py
new file mode 100644
index 000000000..01b3d444e
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/mul_quant8_signed.mod.py
@@ -0,0 +1,96 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2}, 1.0, -128")
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, -128")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 2.0, -128")
+model = model.Operation("MUL", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-126, -124],
+ i2: # input 1
+ [-127, -126, -124, -120]}
+
+output0 = {i3: # output 0
+ [-127, -124, -124, -112]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 2.0, -128")
+model = model.Operation("MUL", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126],
+ i2: # input 1
+ [-126, -124]}
+
+output0 = {i3: # output 0
+ [-127, -124]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MUL op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("MUL", zero_sized, i2, 0).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1, 2],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/not_equal_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/not_equal_quant8_signed.mod.py
new file mode 100644
index 000000000..285c41d09
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/not_equal_quant8_signed.mod.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("NOT_EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 2.0, 0)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[1], # effectively 2
+ output_data=[True, False, True],
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [3], 1.0, 0)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.0, 1)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[1, 2, 3], # effectively 1, 2, 3
+ input1_data=[3], # effectively 2
+ output_data=[True, False, True],
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[-128],
+ input1_data=[72],
+ output_data=[True],
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.49725, 112)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM_SIGNED", [1], 1.64771, -97)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[72],
+ input1_data=[-128],
+ output_data=[True],
+)
diff --git a/nn/runtime/test/specs/V1_3/pad_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/pad_quant8_signed.mod.py
new file mode 100644
index 000000000..b478722a3
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/pad_quant8_signed.mod.py
@@ -0,0 +1,160 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import numpy as np
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 1, 2, 3}")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [1, 2,
+ 3, 4,
+ 3, 3,
+ 2, 1])
+output0 = Output("output0", "TENSOR_FLOAT32", "{4, 8, 8, 6}")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.3, -128),
+ output0: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.3, -128),
+})
+
+Example({
+ input0: [1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0],
+ output0: np.pad([[[[1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0]]]],
+ [[1, 2],
+ [3, 4],
+ [3, 3],
+ [2, 1]],
+ "constant").flatten().tolist(),
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 2.3, -128")
+paddings = Parameter("paddings", "TENSOR_INT32", "{1, 2}", [3, 1])
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{7}, 2.3, -128")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [-127, -126, -125],
+ output0: [-128, -128, -128, -127, -126, -125, -128],
+})
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 1}, 2.3, -128")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [0, 0,
+ 0, 2,
+ 1, 3,
+ 0, 0])
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 7, 1}, 2.3, -128")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [-127, -126, -125,
+ -124, -123, -122],
+ output0: [-128, -127, -126, -125, -128, -128, -128,
+ -128, -124, -123, -122, -128, -128, -128,
+ -128, -128, -128, -128, -128, -128, -128,
+ -128, -128, -128, -128, -128, -128, -128],
+})
+
+#######################################################
+
+# Quantized PAD with non-zero zeroPoint is supported since 1.2.
+# See http://b/132112227.
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 1}, 2.3, -119")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [0, 0,
+ 0, 2,
+ 1, 3,
+ 0, 0])
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 7, 1}, 2.3, -119")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [-127, -126, -125,
+ -124, -123, -122],
+ output0: [-119, -127, -126, -125, -119, -119, -119,
+ -119, -124, -123, -122, -119, -119, -119,
+ -119, -119, -119, -119, -119, -119, -119,
+ -119, -119, -119, -119, -119, -119, -119],
+})
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 3, 1}, 2.3, -124")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [0, 0,
+ 0, 2,
+ 1, 3,
+ 0, 0])
+pad_value = Int32Scalar("pad_value", 9)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 7, 1}, 2.3, -124")
+
+model = Model().Operation("PAD_V2", input0, paddings, pad_value).To(output0)
+
+Example(({
+ input0: [-127, -126, -125,
+ -124, -123, -122],
+}, {
+ output0: [9, -127, -126, -125, 9, 9, 9,
+ 9, -124, -123, -122, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9],
+}))
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 2, 3}, 2.3, -124")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [1, 2,
+ 3, 4,
+ 3, 3,
+ 2, 1])
+pad_value = Int32Scalar("pad_value", -125)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 8, 8, 6}, 2.3, -124")
+
+model = Model().Operation("PAD_V2", input0, paddings, pad_value).To(output0)
+
+Example({
+ input0: [-127, -126, -125,
+ -124, -123, -122],
+ output0: np.pad([[[[-127, -126, -125],
+ [-124, -123, -122]]]],
+ [[1, 2],
+ [3, 4],
+ [3, 3],
+ [2, 1]],
+ "constant",
+ constant_values=-125).flatten().tolist(),
+})
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 2.3, -124")
+paddings = Parameter("paddings", "TENSOR_INT32", "{1, 2}", [3, 1])
+pad_value = Int32Scalar("pad_value", 9)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{7}, 2.3, -124")
+
+model = Model().Operation("PAD_V2", input0, paddings, pad_value).To(output0)
+
+Example({
+ input0: [-127, -126, -125],
+ output0: [9, 9, 9, -127, -126, -125, 9],
+})
diff --git a/nn/runtime/test/specs/V1_3/prelu_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/prelu_quant8_signed.mod.py
new file mode 100644
index 000000000..f603e192d
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/prelu_quant8_signed.mod.py
@@ -0,0 +1,60 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
+a1 = Parameter("alpha", "TENSOR_FLOAT32", "{1, 1, 3}", [0, 1, 2])
+o1 = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
+Model().Operation("PRELU", i1, a1).To(o1)
+
+# output.scale > input.scale && output.scale > input.scale * alpha.scale
+quant8_signed_gt = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ a1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -78),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -8)
+})
+
+# output.scale == input.scale
+quant8_signed_eq1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ a1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -78),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -8)
+})
+
+# output.scale == input.scale * alpha.scale
+quant8_signed_eq2 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ a1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -78),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -8)
+})
+
+# output.scale < input.scale && output.scale < input.scale * alpha.scale
+quant8_signed_lt = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ a1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -78),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -8)
+})
+
+# Instantiate an example
+Example({
+ i1: [ 0, 0, 0,
+ 1, 1, 1,
+ -1, -1, -1,
+ -2, -2, -2],
+ o1: [ 0, 0, 0,
+ 1, 1, 1,
+ 0, -1, -2,
+ 0, -2, -4]
+}).AddVariations(quant8_signed_gt, quant8_signed_eq1, quant8_signed_eq2, quant8_signed_lt, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/quantize_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/quantize_quant8_signed.mod.py
new file mode 100644
index 000000000..949df492f
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/quantize_quant8_signed.mod.py
@@ -0,0 +1,68 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import numpy as np
+
+num_values = 300
+values = list(np.linspace(-10, 10, num_values))
+
+for input_type in ["TENSOR_FLOAT32", "TENSOR_FLOAT16"]:
+ for scale, offset in [(1.0, -128),
+ (1.0, -127),
+ (0.01, -8),
+ (10.0, -8)]:
+ input0 = Input("input0", input_type, "{%d}" % num_values)
+ output0 = Output("output0", input_type, "{%d}" % num_values)
+
+ model = Model().Operation("QUANTIZE", input0).To(output0)
+
+ quantizeOutput = DataTypeConverter().Identify({
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", scale, offset],
+ })
+
+ Example({
+ input0: values,
+ output0: values,
+ }).AddVariations(quantizeOutput, includeDefault=False)
+
+
+# Zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# QUANTIZE op with numBatches = 0.
+o3 = Output("out", "TENSOR_QUANT8_ASYMM_SIGNED", "{0, 2, 2, 1}, 0.1f, 0") # out
+model = model.Operation("QUANTIZE", zero_sized).To(o3)
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations("relaxed", "float16")
diff --git a/nn/runtime/test/specs/V1_3/reduce_max_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/reduce_max_quant8_signed.mod.py
new file mode 100644
index 000000000..668ea6e0b
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/reduce_max_quant8_signed.mod.py
@@ -0,0 +1,70 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+ model = Model().Operation("REDUCE_MAX", input0, axes, keep_dims).To(output0)
+ quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ })
+ Example({
+ input0: input_data,
+ output0: output_data,
+ }, model=model).AddVariations(quant8_signed, includeDefault=False)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+ input_data=[-1, -2,
+ 3, 4,
+ 5, -6],
+ axes=[-1],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+ output_data=[-1, 4, 5],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+ input_data=[9.527],
+ axes=[0],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+ output_data=[9.527],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[1, 0, -3, -3],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+ output_data=[2.3, 2.4],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[0, 2],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+ output_data=[2.0, 2.2, 2.4],
+)
diff --git a/nn/runtime/test/specs/V1_3/reduce_min_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/reduce_min_quant8_signed.mod.py
new file mode 100644
index 000000000..bf518349f
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/reduce_min_quant8_signed.mod.py
@@ -0,0 +1,70 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+ model = Model().Operation("REDUCE_MIN", input0, axes, keep_dims).To(output0)
+ quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ })
+ Example({
+ input0: input_data,
+ output0: output_data,
+ }, model=model).AddVariations(quant8_signed, includeDefault=False)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+ input_data=[-1, -2,
+ 3, 4,
+ 5, -6],
+ axes=[-1],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+ output_data=[-2, 3, -6],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+ input_data=[9.527],
+ axes=[0],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+ output_data=[9.527],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[1, 0, -3, -3],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+ output_data=[0.1, 0.2],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[0, 2],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+ output_data=[0.1, 0.3, 0.5],
+)
diff --git a/nn/runtime/test/specs/V1_3/relu1_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/relu1_quant8_signed.mod.py
new file mode 100644
index 000000000..40a48c371
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/relu1_quant8_signed.mod.py
@@ -0,0 +1,104 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # input 0
+o = Output("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # output 0
+model = model.Operation("RELU1", i1).To(o)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -127, -126, -125]}
+output0 = {o: # output 0
+ [-128, -127, -126, -126]}
+
+# Instantiate one example
+Example((input0, output0))
+
+#######################################################
+
+# Example 2. Input in operand 0,
+input1 = {i1: # input 0
+ [-124, -118, -28, 127]}
+output1 = {o: # output 0
+ [-126, -126, -126, -126]}
+
+# Instantiate another example
+Example((input1, output1))
+
+#######################################################
+
+model = Model()
+
+d0 = 2
+d1 = 64
+d2 = 64
+d3 = 2
+
+i0 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 1.f, 0" % (d0, d1, d2, d3))
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 1.f, 0" % (d0, d1, d2, d3))
+
+model = model.Operation("RELU1", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x % 256 for x in range(r)])()
+output_values = [127 if x < 127 else 129 if x > 129 else x for x in input_values]
+
+input0 = {i0: [value - 128 for value in input_values]}
+output0 = {output: [value - 128 for value in output_values]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RELU1 op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("RELU1", zero_sized).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/relu6_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/relu6_quant8_signed.mod.py
new file mode 100644
index 000000000..eb3407135
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/relu6_quant8_signed.mod.py
@@ -0,0 +1,103 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # input 0
+i2 = Output("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # output 0
+model = model.Operation("RELU6", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -127, -117, -116]}
+output0 = {i2: # output 0
+ [-128, -127, -117, -116]}
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+# Example 2. Input in operand 0,
+input1 = {i1: # input 0
+ [-115, -114, 126, 127]}
+output1 = {i2: # output 0
+ [-116, -116, -116, -116]}
+# Instantiate an example
+Example((input1, output1))
+
+#######################################################
+
+model = Model()
+
+d0 = 2
+d1 = 128
+d2 = 20
+d3 = 2
+
+i0 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 1.f, 0" % (d0, d1, d2, d3))
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 1.f, 0" % (d0, d1, d2, d3))
+
+model = model.Operation("RELU6", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x % 256 for x in range(r)])()
+output_values = [128 if x < 128 else 134 if x > 134 else x for x in input_values]
+
+input0 = {i0: [value - 128 for value in input_values]}
+output0 = {output: [value - 128 for value in output_values]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RELU6 op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("RELU6", zero_sized).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/relu_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/relu_quant8_signed.mod.py
new file mode 100644
index 000000000..450933bb1
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/relu_quant8_signed.mod.py
@@ -0,0 +1,108 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+# input 0
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, 0")
+# output 0
+o = Output("op2", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 1.f, 0")
+model = model.Operation("RELU", i1).To(o)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-128, -127, -2, -1]}
+output0 = {o: # output 0
+ [0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+# Example 2. Input in operand 0,
+input1 = {i1: # input 0
+ [0, 1, 126, 127]}
+output1 = {o: # output 0
+ [0, 1, 126, 127]}
+
+# Instantiate another example
+Example((input1, output1))
+
+#######################################################
+
+model = Model()
+
+d0 = 2
+d1 = 32
+d2 = 60
+d3 = 2
+
+i0 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 1.f, 0" % (d0, d1, d2, d3))
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 1.f, 0" % (d0, d1, d2, d3))
+
+model = model.Operation("RELU", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x % 256 for x in range(r)])()
+input0 = {i0: input_values}
+output_values = (lambda r = rng: [x % 256 if x % 256 > 128 else 128 for x in range(r)])()
+output0 = {output: output_values}
+
+input0 = {i0: [value - 128 for value in input_values]}
+output0 = {output: [value - 128 for value in output_values]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RELU op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("RELU", zero_sized).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/reshape_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/reshape_quant8_signed.mod.py
new file mode 100644
index 000000000..efbb7ba19
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/reshape_quant8_signed.mod.py
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+# a line of 3 pixels, 3 components/pixel
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 3, 3}, 1.f, -128")
+i2 = Parameter("op2", "TENSOR_INT32", "{1}", [-1])
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{9}, 1.f, -128")
+model = model.Operation("RESHAPE", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125,
+ -124, -123, -122,
+ -121, -120, -119]}
+
+output0 = {i3: # output 0
+ [-127, -126, -125, -124, -123, -122, -121, -120, -119]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_3/resize_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/resize_quant8_signed.mod.py
new file mode 100644
index 000000000..7ec0ee23d
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/resize_quant8_signed.mod.py
@@ -0,0 +1,412 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# RESIZE_BILINEAR_NCHW_1, w = 3, h = 3
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+model_shape = Model("shape").Operation("RESIZE_BILINEAR", i1, 3, 3, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_BILINEAR", i1, 1.5, 1.5, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128)
+})
+
+test1 = {
+ i1: [1.0, 1.0, 2.0, 2.0],
+ o1: [1.0, 1.0, 1.0,
+ 1.666666667, 1.666666667, 1.666666667,
+ 2.0, 2.0, 2.0]
+}
+
+# Instantiate an example
+Example(test1, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test1, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_BILINEAR_NCHW_2, w = 3, h = 3
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+model_shape = Model("shape").Operation("RESIZE_BILINEAR", i2, 3, 3, layout).To(o2)
+model_scale = Model("scale").Operation("RESIZE_BILINEAR", i2, 1.6, 1.6, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
+})
+
+test2 = {
+ i2: [3, 4, 6, 10, 9, 10, 12, 16],
+ o2: [3, 4, 5, 8, 6, 10,
+ 7, 8, 9, 12, 10, 14,
+ 9, 10, 11, 14, 12, 16,]
+}
+
+# Instantiate an example
+Example(test2, model=model_shape).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test2, model=model_scale).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_BILINEAR, w = 3, h = 3
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+model_shape = Model("shape").Operation("RESIZE_BILINEAR", i3, 3, 3).To(o3)
+model_scale = Model("scale").Operation("RESIZE_BILINEAR", i3, 1.8, 1.8).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128)
+})
+
+test3 = {
+ i3: [1.0, 1.0, 2.0, 2.0],
+ o3: [1.0, 1.0, 1.0,
+ 1.666666667, 1.666666667, 1.666666667,
+ 2.0, 2.0, 2.0]
+}
+
+# Instantiate an example
+Example(test3, model=model_shape).AddVariations(quant8_signed, includeDefault=False)
+Example(test3, model=model_scale).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, resize by output shape
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RESIZE_BILINEAR op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("RESIZE_BILINEAR", zero_sized, 3, 3, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, resize by scale
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RESIZE_BILINEAR op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("RESIZE_BILINEAR", zero_sized, 1.6, 1.6, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+layout = BoolScalar("layout", False) # NHWC
+
+# RESIZE_NEAREST_NEIGHBOR_1, w = 1, h = 1
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 1, 1, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 1, 1, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 0.5, 0.5, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0)
+})
+
+test1 = {
+ i1: [1, 2, 3, 4],
+ o1: [1]
+}
+
+Example(test1, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test1, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_2, w = 3, h = 3
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 3, 3, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 1.5, 1.5, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
+})
+
+test2 = {
+ i1: [1, 2, 3, 4],
+ o1: [1, 1, 2, 1, 1, 2, 3, 3, 4]
+}
+
+Example(test2, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test2, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_3, w = 2, h = 2
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 2, 2, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 0.8, 0.8, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28)
+})
+
+test3 = {
+ i1: [1, 2, 3, 4, 5, 6, 7, 8, 9],
+ o1: [1, 2, 4, 5]
+}
+
+Example(test3, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test3, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_4, w = 5, h = 2
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 2, 5, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 5, 2, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 2.6, 1.1, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28)
+})
+
+test4 = {
+ i1: [1, 2, 3, 4],
+ o1: [1, 1, 1, 2, 2, 3, 3, 3, 4, 4]
+}
+
+Example(test4, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test4, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_5, w = 3, h = 3
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 3, 3, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 0.9, 0.9, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28)
+})
+
+test5 = {
+ i1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ o1: [1, 2, 3, 5, 6, 7, 9, 10, 11]
+}
+
+Example(test5, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test5, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_6, w = 2, h = 5
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 5, 2, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 2, 5, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 1.4, 2.8, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28)
+})
+
+test6 = {
+ i1: [1, 2, 3, 4],
+ o1: [1, 2, 1, 2, 1, 2, 3, 4, 3, 4]
+}
+
+Example(test6, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test6, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_7, w = 4, h = 4
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 4, 4, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 2.0, 2.0, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28)
+})
+
+test7 = {
+ i1: [1, 2, 3, 4],
+ o1: [1, 1, 2, 2, 1, 1, 2, 2, 3, 3, 4, 4, 3, 3, 4, 4]
+}
+
+Example(test7, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test7, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# RESIZE_NEAREST_NEIGHBOR_8, w = 3, h = 3
+i1 = Input("in", "TENSOR_FLOAT32", "{2, 2, 2, 2}") # input 0
+o1 = Output("out", "TENSOR_FLOAT32", "{2, 3, 3, 2}") # output 0
+model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 3, 3, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 1.6, 1.8, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28)
+})
+
+test8 = {
+ i1: [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8],
+ o1: [1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2,
+ 3, 3, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6,
+ 5, 5, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8]
+}
+
+Example(test8, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+Example(test8, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, resize by output shape
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RESIZE_NEAREST_NEIGHBOR op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("RESIZE_NEAREST_NEIGHBOR", zero_sized, 3, 3, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, resize by scale
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RESIZE_NEAREST_NEIGHBOR op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("RESIZE_NEAREST_NEIGHBOR", zero_sized, 1.6, 1.6, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/roi_align_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/roi_align_quant8_signed.mod.py
new file mode 100644
index 000000000..024977c55
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/roi_align_quant8_signed.mod.py
@@ -0,0 +1,264 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# ROI_ALIGN_1, outputShape = [2, 2], spatialScale = [0.5, 0.5], samplingRatio = [4, 4]
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+roi1 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o1 = Output("out", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+Model().Operation("ROI_ALIGN", i1, roi1, [0, 0, 0, 0], 2, 2, 2.0, 2.0, 4, 4, layout).To(o1)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ roi1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.0625, 0)
+})
+
+# Instantiate an example
+Example({
+ i1: [
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5
+ ],
+ roi1: [
+ 2, 2, 4, 4,
+ 0, 0, 8, 8,
+ 2, 0, 4, 8,
+ 0, 2, 8, 4
+ ],
+ o1: [
+ 0.375, 5.125, -0.375, 2.875,
+ -0.5, -0.3125, 3.1875, 1.125,
+ 0.25, 4.25, 4.875, 0.625,
+ -0.1875, 1.125, 0.9375, -2.625
+ ]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_ALIGN_2, outputShape = [2, 3], spatialScale = [0.25, 0.25], samplingRatio = [4, 4]
+i2 = Input("in", "TENSOR_FLOAT32", "{4, 4, 8, 2}")
+roi2 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o2 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_ALIGN", i2, roi2, [0, 0, 3, 3], 2, 3, 4.0, 4.0, 4, 4, layout).To(o2)
+
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.04, -128),
+ roi2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.03125, -118)
+})
+
+# Instantiate an example
+Example({
+ i2: [
+ 8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+ 7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+ 5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+ 2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+ 0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+ 2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+ 9.01, 0.95, 4.07, 0.65,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+ 5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+ 1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+ 1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+ 6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+ 4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+ 8.06, 4.80, 7.80, 5.43
+ ],
+ roi2: [
+ 4, 4, 28, 12,
+ 4, 4, 32, 16,
+ 7, 1, 29, 15, # test rounding
+ 1, 7, 9, 11 # test roi with shape smaller than output
+ ],
+ o2: [
+ 5.150000, 5.491250, 4.733750, 7.100000, 4.827500,
+ 5.843750, 4.721250, 4.797500, 3.750000, 6.592500,
+ 5.452500, 3.362500,
+ 4.899396, 5.861696, 4.941504, 5.979741, 3.182904,
+ 6.111551, 5.141833, 4.631891, 3.903325, 4.627793,
+ 5.537240, 1.356019,
+ 4.845915, 3.618338, 3.301958, 6.250566, 2.930461,
+ 4.269676, 3.642174, 4.201423, 5.008657, 5.735293,
+ 7.426004, 4.819665,
+ 4.518229, 6.887344, 2.952656, 5.565781, 3.952786,
+ 2.552812, 5.191667, 6.854167, 3.920000, 6.512500,
+ 4.886250, 5.497708
+ ]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_ALIGN_3, outputShape = [2, 3], spatialScale = [0.25, 0.25], samplingRatio = [0, 0]
+i3 = Input("in", "TENSOR_FLOAT32", "{2, 4, 8, 2}")
+roi3 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o3 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_ALIGN", i3, roi3, [0, 0, 1, 1], 2, 3, 4.0, 4.0, 0, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.04, -128),
+ roi3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.03125, -118)
+})
+
+# Instantiate an example
+Example({
+ i3: [
+ 8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+ 7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+ 5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+ 2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+ 0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+ 2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+ 9.01, 0.95, 4.07, 0.65,
+ 5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+ 5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+ 1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+ 1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+ 6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+ 4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+ 8.06, 4.80, 7.80, 5.43
+ ],
+ roi3: [
+ 4, 4, 28, 12,
+ 4, 4, 32, 16,
+ 7, 1, 29, 15, # test rounding
+ 1, 7, 9, 11 # test roi with shape smaller than output
+ ],
+ o3: [
+ 5.150000, 5.491250, 4.733750, 7.100000, 4.827500,
+ 5.843750, 4.721250, 4.797500, 3.750000, 6.592500,
+ 5.452500, 3.362500,
+ 4.869884, 5.908148, 4.941701, 5.955718, 3.113403,
+ 6.341898, 5.156389, 4.604016, 3.881782, 4.616123,
+ 5.690694, 1.237153,
+ 5.028047, 3.560944, 3.157656, 6.395469, 2.896243,
+ 4.336576, 3.563021, 4.057767, 5.053437, 6.028906,
+ 7.396966, 4.668906,
+ 4.385000, 6.905000, 2.815000, 5.502500, 4.161667,
+ 1.829167, 5.191667, 6.854167, 3.920000, 6.512500,
+ 5.106667, 5.612500
+ ]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_ALIGN_4, outputShape = [2, 2], spatialScale = [0.5, 1.0], samplingRatio = [0, 4]
+i4 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}")
+roi4 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+o4 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
+Model().Operation("ROI_ALIGN", i4, roi4, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, 0, 4, layout).To(o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ roi4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.0625, 0)
+})
+
+# Instantiate an example
+Example({
+ i4: [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ],
+ roi4: [
+ 1, 2, 2, 4,
+ 0, 0, 4, 8,
+ 1, 0, 2, 8,
+ 0, 2, 4, 4,
+ 0, 0, 0, 0
+ ],
+ o4: [
+ 0.375, 5.125, -0.375, 2.875,
+ -0.5, -0.3125, 3.1875, 1.125,
+ 0.25, 4.25, 4.875, 0.625,
+ -0.1875, 1.125, 0.9375, -2.625,
+ -7.4375, -3.3125, -6.8125, -3.4375
+ ]
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_ALIGN_zero_sized
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# ROI_ALIGN op with numRois = 0.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Output("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [0],
+ o1: [],
+ o2: [],
+ zero_sized: [],
+}).AddNchw(i1, zero_sized, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_ALIGN_6, hanging issue
+i4 = Input("in", "TENSOR_FLOAT32", "{1, 512, 8, 1}")
+roi4 = Input("roi", "TENSOR_FLOAT32", "{1, 4}")
+o4 = Output("out", "TENSOR_FLOAT32", "{1, 128, 4, 1}")
+Model().Operation("ROI_ALIGN", i4, roi4, [0], 128, 4, 1.0, 64.0, 10, 10, layout).To(o4)
+
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ roi4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.0625, 0)
+})
+
+# Instantiate an example
+Example({
+ i4: [0] * (512 * 8),
+ roi4: [450, 500, 466, 508],
+ o4: [0] * (128 * 4)
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/roi_pooling_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/roi_pooling_quant8_signed.mod.py
new file mode 100644
index 000000000..44047db06
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/roi_pooling_quant8_signed.mod.py
@@ -0,0 +1,152 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# ROI_POOLING_1, outputShape = [2, 2], spatialScale = [0.5, 0.5]
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+roi1 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+o1 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
+Model().Operation("ROI_POOLING", i1, roi1, [0, 0, 0, 0, 0], 2, 2, 2.0, 2.0, layout).To(o1)
+
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ roi1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0)
+})
+
+# Instantiate an example
+Example({
+ i1: [
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5
+ ],
+ roi1: [
+ 2, 2, 4, 4,
+ 0, 0, 6, 6,
+ 2, 0, 4, 6,
+ 0, 2, 6, 4,
+ 8, 8, 8, 8 # empty region
+ ],
+ o1: [
+ -2, 9, -2, 3,
+ -1, 9, 10, 5,
+ -1, 9, 10, 3,
+ -2, 9, 7, 3,
+ 0, 0, 0, 0
+ ]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_POOLING_2, outputShape = [2, 3], spatialScale = 0.25
+i2 = Input("in", "TENSOR_FLOAT32", "{4, 4, 8, 2}")
+roi2 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o2 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_POOLING", i2, roi2, [0, 0, 3, 3], 2, 3, 4.0, 4.0, layout).To(o2)
+
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.04, -128),
+ roi2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.04, -128)
+})
+
+# Instantiate an example
+Example({
+ i2: [
+ 8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+ 7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+ 5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+ 2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+ 0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+ 2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+ 9.01, 0.95, 4.07, 0.65,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+ 5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+ 1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+ 1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+ 6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+ 4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+ 8.06, 4.80, 7.80, 5.43
+ ],
+ roi2: [
+ 4, 4, 24, 8,
+ 4, 4, 28, 12,
+ 7, 1, 25, 11, # test rounding
+ 1, 7, 5, 11 # test roi with shape smaller than output
+ ],
+ o2: [
+ 6.16, 8.60, 7.12, 6.79, 5.13, 8.44, 7.86, 9.69, 4.42, 9.89, 9.30, 6.47,
+ 7.86, 9.89, 9.30, 9.89, 9.30, 9.50, 7.86, 9.89, 9.30, 9.89, 9.30, 9.50,
+ 9.50, 6.73, 9.50, 9.28, 6.89, 8.97, 6.18, 9.63, 9.99, 9.85, 9.99, 9.85,
+ 7.29, 6.94, 7.29, 6.94, 2.31, 6.88, 7.90, 6.78, 7.90, 6.82, 4.64, 6.82
+ ]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# ROI_POOLING_3, outputShape = [2, 2], spatialScale = [0.5, 1]
+i3 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}")
+roi3 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+o3 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
+Model().Operation("ROI_POOLING", i3, roi3, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ roi3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0)
+})
+
+# Instantiate an example
+Example({
+ i3: [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ],
+ roi3: [
+ 1, 2, 2, 4,
+ 0, 0, 3, 6,
+ 1, 0, 2, 6,
+ 0, 2, 3, 4,
+ 0, 0, 0, 0
+ ],
+ o3: [
+ -2, 9, -2, 3,
+ -1, 9, 10, 5,
+ -1, 9, 10, 3,
+ -2, 9, 7, 3,
+ -10, -10, -10, -10
+ ]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/select_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/select_quant8_signed.mod.py
new file mode 100644
index 000000000..d4169c7ec
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/select_quant8_signed.mod.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, input2, output0, input0_data, input1_data, input2_data, output_data):
+ model = Model().Operation("SELECT", input0, input1, input2).To(output0)
+ quant8_signed = DataTypeConverter().Identify({
+ input1: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.5, 1],
+ input2: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0],
+ })
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ input2: input2_data,
+ output0: output_data,
+ }, model=model, name=name).AddVariations(quant8_signed, includeDefault=False)
+
+test(
+ name="one_dim",
+ input0=Input("input0", "TENSOR_BOOL8", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ input2=Input("input2", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+ input0_data=[True, False, True],
+ input1_data=[1, 2, 3],
+ input2_data=[4, 5, 6],
+ output_data=[1, 5, 3],
+)
+
+test(
+ name="two_dim",
+ input0=Input("input0", "TENSOR_BOOL8", "{2, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2, 2}"),
+ input2=Input("input2", "TENSOR_FLOAT32", "{2, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{2, 2}"),
+ input0_data=[False, True, False, True],
+ input1_data=[1, 2, 3, 4],
+ input2_data=[5, 6, 7, 8],
+ output_data=[5, 2, 7, 4],
+)
+
+test(
+ name="five_dim",
+ input0=Input("input0", "TENSOR_BOOL8", "{2, 1, 2, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2, 1, 2, 1, 2}"),
+ input2=Input("input2", "TENSOR_FLOAT32", "{2, 1, 2, 1, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{2, 1, 2, 1, 2}"),
+ input0_data=[True, False, True, False, True, False, True, False],
+ input1_data=[1, 2, 3, 4, 5, 6, 7, 8],
+ input2_data=[9, 10, 11, 12, 13, 14, 15, 16],
+ output_data=[1, 10, 3, 12, 5, 14, 7, 16],
+)
diff --git a/nn/runtime/test/specs/V1_3/slice_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/slice_quant8_signed.mod.py
new file mode 100644
index 000000000..7fd40f728
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/slice_quant8_signed.mod.py
@@ -0,0 +1,80 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+inp = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 2, 3, 1}, 2.0, 0")
+inp_data = [
+ -127, -127, -127, -126, -126, -126, -125, -125, -125, -124, -124, -124,
+ -123, -123, -123, -122, -122, -122
+]
+begin = Input("begin", "TENSOR_INT32", "{4}")
+begin_data = [1, 0, 0, 0]
+size = Input("size", "TENSOR_INT32", "{4}")
+size_data = [2, 1, 3, 1]
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1, 3, 1}, 2.0, 0")
+output_data = [-125, -125, -125, -123, -123, -123]
+
+model = Model().Operation("SLICE", inp, begin, size).To(output)
+Example(
+ {
+ inp: inp_data,
+ begin: begin_data,
+ size: size_data,
+ output: output_data,
+ },
+ model=model)
+
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}",
+ [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3,
+ -1, 0, 0.4, 1.0,
+ 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4,
+ layout).To(zero_sized)
+
+# SLICE op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("SLICE", zero_sized, [0, 1, 1, 0],
+ [-1, 1, -1, 1]).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/softmax_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/softmax_quant8_signed.mod.py
new file mode 100644
index 000000000..18e83a40b
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/softmax_quant8_signed.mod.py
@@ -0,0 +1,136 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+model = Model()
+
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4}, 0.5f, -128") # batch = 1, depth = 1
+beta = Float32Scalar("beta", 0.00001) # close to 0
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4}, 0.00390625f, -128")
+
+# model 1
+model = model.Operation("SOFTMAX", i1, beta).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: [-127, -126, -118, -108]}
+
+output0 = {output: [-64, -64, -64, -64]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 5}, 0.5f, -128") # batch = 2, depth = 5
+beta = Float32Scalar("beta", 1.)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 5}, 0.00390625f, -128")
+
+# model 1
+model = model.Operation("SOFTMAX", i1, beta).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1:
+ [-127, -126, -125, -124, -123,
+ 127, 126, 125, 124, 123]}
+
+output0 = {output:
+ [-113, -104, -88, -61, -18,
+ -18, -61, -88, -104, -113]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+i = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 5}") # input 0
+o = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 5}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ o: ("TENSOR_QUANT8_ASYMM_SIGNED", 1./256, -128)
+})
+
+example1 = {
+ i: [17., 16., 15., 14., 1.,
+ -1., -2., -3., -4., -17.] * 4,
+ o: [0.643914213228014,
+ 0.236882800924671,
+ 0.087144312427294,
+ 0.032058600957022,
+ 7.246299848982885e-08] * 8
+}
+example2 = {
+ i: [1., 2., 3., 4., 5., -1., -2., -3., -4., -5.] * 4,
+ o: [0.2] * 40
+}
+
+# All dimensions other than 2 or 4, without axis parameter
+# beta = 1.0
+Model().Operation("SOFTMAX", i, 1.0).To(o)
+Example(example1).AddVariations(quant8_signed, includeDefault=False).AddDims([1, 3], i, o)
+# beta = 0.000001
+Model().Operation("SOFTMAX", i, 0.000001).To(o)
+Example(example2).AddVariations(quant8_signed, includeDefault=False).AddDims([1, 3], i, o)
+
+#######################################################
+# All dimensions, with all possible axis parameter
+# beta = 1.0
+Model("axis").Operation("SOFTMAX", i, 1.0, axis).To(o)
+Example(example1).AddVariations(quant8_signed, includeDefault=False).AddAllDimsAndAxis(i, o, axis)
+# beta = 0.000001
+Model("axis").Operation("SOFTMAX", i, 0.000001, axis).To(o)
+Example(example2).AddVariations(quant8_signed, includeDefault=False).AddAllDimsAndAxis(i, o, axis)
+
+#######################################################
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# SOFTMAX op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("SOFTMAX", zero_sized, 1.0).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1./256, -128)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/space_to_batch_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/space_to_batch_quant8_signed.mod.py
new file mode 100644
index 000000000..1cede4568
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/space_to_batch_quant8_signed.mod.py
@@ -0,0 +1,212 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 4, 1}, 1.0, -128")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 2, 2, 1}, 1.0, -128")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [
+ -127, -126, -125, -124, -123, -122, -121, -120, -119, -118, -117,
+ -116, -115, -114, -113, -112
+ ]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -127, -125, -119, -117, -126, -124, -118, -116, -123, -121, -115,
+ -113, -122, -120, -114, -112
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5, 2, 1}, 1.0, -128")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{6, 2, 2, 1}, 1.0, -128")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [-127, -126, -125, -124, -123, -122, -121, -120, -119, -118]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -128, -128, -128, -123, -128, -128, -128, -122, -128, -127, -128,
+ -121, -128, -126, -128, -120, -128, -125, -128, -119, -128, -124,
+ -128, -118
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 2, 1}, 1.0, -128")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{6, 2, 4, 1}, 1.0, -128")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [-127, -126, -125, -124, -123, -122, -121, -120]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -128, -128, -128, -128, -128, -123, -128, -128, -128, -128, -128,
+ -128, -128, -122, -128, -128, -128, -127, -128, -128, -128, -121,
+ -128, -128, -128, -126, -128, -128, -128, -120, -128, -128, -128,
+ -125, -128, -128, -128, -128, -128, -128, -128, -124, -128, -128,
+ -128, -128, -128, -128
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+# Quantized SPACE_TO_BATCH_ND with non-zero zeroPoint is supported since 1.2.
+# See http://b/132112227.
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5, 2, 1}, 1.0, -119")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{6, 2, 2, 1}, 1.0, -119")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [-127, -126, -125, -124, -123, -122, -121, -120, -119, -118]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -119, -119, -119, -123, -119, -119, -119, -122, -119, -127, -119,
+ -121, -119, -126, -119, -120, -119, -125, -119, -119, -119, -124,
+ -119, -118
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# SPACE_TO_BATCH_NCHW_1, block_size = [2, 2]
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+pad1 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+Model().Operation("SPACE_TO_BATCH_ND", i1, [2, 2], pad1, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# SPACE_TO_BATCH_NCHW_2, block_size = [2, 2]
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+Model().Operation("SPACE_TO_BATCH_ND", i2, [2, 2], pad1, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ o2: [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# SPACE_TO_BATCH_NCHW_3, block_size = [3, 2]
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 5, 2, 1}")
+pad3 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+o3 = Output("op4", "TENSOR_FLOAT32", "{6, 2, 2, 1}")
+Model().Operation("SPACE_TO_BATCH_ND", i3, [3, 2], pad3, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ o3: [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+ 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# SPACE_TO_BATCH_NCHW_4, block_size = [3, 2]
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 2, 1}")
+pad4 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+o4 = Output("op4", "TENSOR_FLOAT32", "{6, 2, 4, 1}")
+Model().Operation("SPACE_TO_BATCH_ND", i4, [3, 2], pad4, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [1, 2, 3, 4, 5, 6, 7, 8],
+ o4: [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+ 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+ 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]
+}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/space_to_depth_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/space_to_depth_quant8_signed.mod.py
new file mode 100644
index 000000000..7fc7a51e7
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/space_to_depth_quant8_signed.mod.py
@@ -0,0 +1,124 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 2}, 0.5f, -128")
+block = Int32Scalar("radius", 2)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 1, 8}, 0.5f, -128")
+
+model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124, 124, 125, 126, 127]}
+
+output0 = {output: # output 0
+ [-127, -126, -125, -124, 124, 125, 126, 127]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 4, 4, 1}, 0.5f, -128")
+block = Int32Scalar("radius", 2)
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 4}, 0.5f, -128")
+
+model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [
+ -128, -127, -126, -125, -124, -123, -122, -121, 120, 121, 122, 123,
+ 124, 125, 126, 127
+ ]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -128, -127, -124, -123, -126, -125, -122, -121, 120, 121, 124, 125,
+ 122, 123, 126, 127
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# SPACE_TO_DEPTH_NCHW_1, block_size = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 8}")
+Model().Operation("SPACE_TO_DEPTH", i1, 2, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# SPACE_TO_DEPTH_NCHW_2, block_size = 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("SPACE_TO_DEPTH", i2, 2, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.],
+ o2: [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]
+}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# SPACE_TO_DEPTH_NCHW_3, block_size = 2
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 8}")
+Model().Operation("SPACE_TO_DEPTH", i3, 2, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -128),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [10, 20, 11, 21, 12, 22, 13, 23,
+ 14, 24, 15, 25, 16, 26, 17, 27,
+ 18, 28, 19, 29, 110, 210, 111, 211,
+ 112, 212, 113, 213, 114, 214, 115, 215],
+ o3: [10, 20, 11, 21, 14, 24, 15, 25,
+ 12, 22, 13, 23, 16, 26, 17, 27,
+ 18, 28, 19, 29, 112, 212, 113, 213,
+ 110, 210, 111, 211, 114, 214, 115, 215]
+}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/split_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/split_quant8_signed.mod.py
new file mode 100644
index 000000000..d4f6c6d66
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/split_quant8_signed.mod.py
@@ -0,0 +1,103 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{6}, 1.0, -128")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+output2 = Output("output2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To(
+ (output0, output1, output2))
+
+# Example 1.
+input_dict = {input0: [-127, -126, -125, -124, -123, -122]}
+output_dict = {
+ output0: [-127, -126],
+ output1: [-125, -124],
+ output2: [-123, -122],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 2.0, -125")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3}, 2.0, -125")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3}, 2.0, -125")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To(
+ (output0, output1))
+
+# Example 1.
+input_dict = {input0: [-127, -126, -125, -124, -123, -122]}
+output_dict = {
+ output0: [-127, -126, -125],
+ output1: [-124, -123, -122],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 2.0, -125")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1}, 2.0, -125")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1}, 2.0, -125")
+output2 = Output("output2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1}, 2.0, -125")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To(
+ (output0, output1, output2))
+
+# Example 1.
+input_dict = {input0: [-127, -126, -125, -124, -123, -122]}
+output_dict = {
+ output0: [-127, -124],
+ output1: [-126, -123],
+ output2: [-125, -122],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict))
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2, 2}, 1.0, -128")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{2, 1, 2}, 1.0, -128")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{2, 1, 2}, 1.0, -128")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To(
+ (output0, output1))
+
+# Example 1.
+input_dict = {input0: [-127, -126, -125, -124, -123, -122, -121, -120]}
+output_dict = {
+ output0: [-127, -126, -123, -122],
+ output1: [-125, -124, -121, -120],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict))
diff --git a/nn/runtime/test/specs/V1_3/squeeze_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/squeeze_quant8_signed.mod.py
new file mode 100644
index 000000000..e4fcb72e8
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/squeeze_quant8_signed.mod.py
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 24, 1}, 1.0, -128")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [2])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 24}, 1.0, -128")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {
+ i1: # input 0
+ [
+ -127, -126, -125, -124, -123, -122, -121, -120, -119, -118, -117,
+ -116, -115, -114, -113, -112, -111, -110, -109, -108, -107, -106,
+ -105, -104
+ ]
+}
+
+output0 = {
+ output: # output 0
+ [
+ -127, -126, -125, -124, -123, -122, -121, -120, -119, -118, -117,
+ -116, -115, -114, -113, -112, -111, -110, -109, -108, -107, -106,
+ -105, -104
+ ]
+}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_3/strided_slice_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/strided_slice_quant8_signed.mod.py
new file mode 100644
index 000000000..d49d72c64
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/strided_slice_quant8_signed.mod.py
@@ -0,0 +1,288 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 2)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124, -123, -122]}
+
+output0 = {output: # output 0
+ [-124, -123, -122]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [1, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124, -123, -122]}
+
+output0 = {output: # output 0
+ [-127, -126, -125]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {output: # output 0
+ [-126, -125]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-3])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {output: # output 0
+ [-126, -125]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-5])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {output: # output 0
+ [-127, -126, -125]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [-2])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {output: # output 0
+ [-126]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 1)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {output: # output 0
+ [-127, -126, -125]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{4}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 1)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124]}
+
+output0 = {output: # output 0
+ [-126, -125, -124]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [-4])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [-1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125]}
+
+output0 = {output: # output 0
+ [-125, -126, -127]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, -1])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, -4])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, -1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124, -123, -122]}
+
+output0 = {output: # output 0
+ [-122, -123, -124]}
+
+# Instantiate an example
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 1.0, -128")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 1)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, -128")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-127, -126, -125, -124, -123, -122]}
+
+output0 = {output: # output 0
+ [-127, -126, -124, -123]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/nn/runtime/test/specs/V1_3/sub_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/sub_quant8_signed.mod.py
new file mode 100644
index 000000000..9e88d3e9d
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/sub_quant8_signed.mod.py
@@ -0,0 +1,145 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import itertools
+import random
+
+def dequantize(x, scale, offset):
+ return (x - offset) * scale
+
+def quantize(x, scale, offset):
+ return max(-128, min(127, int(round(x / scale)) + offset))
+
+def create_test(input0_scale, input0_offset,
+ input1_scale, input1_offset,
+ output_scale, output_offset):
+ def sub_quantized(a, b):
+ a_dequantized = dequantize(a, input0_scale, input0_offset)
+ b_dequantized = dequantize(b, input1_scale, input1_offset)
+ return quantize(a_dequantized - b_dequantized, output_scale, output_offset)
+
+ values = [-128, -127, -126, -125, -124, -123, 122, 123, 124, 125, 126, 127]
+ inputs = list(itertools.product(values, values))
+ input0_values, input1_values = zip(*inputs)
+ output_values = [sub_quantized(a, b) for a, b in inputs]
+ size = len(output_values)
+ input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d}, %g, %d" % (size, input0_scale, input0_offset))
+ input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d}, %g, %d" % (size, input1_scale, input1_offset))
+ activation = 0
+ output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED",
+ "{%d}, %g, %d" % (size, output_scale, output_offset))
+ model = Model().Operation("SUB", input0, input1, activation).To(output0)
+ Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+ })
+
+scales_and_offsets = [(1.0, -128),
+ (1.0, -127),
+ (0.01, -8),
+ (10.0, -8)]
+for params in itertools.product(scales_and_offsets,
+ scales_and_offsets,
+ scales_and_offsets):
+ input0_params, input1_params, output_params = params
+ create_test(*input0_params, *input1_params, *output_params)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2}, 1.0, -128")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, -128")
+activation = 0
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, -128")
+
+model = Model("quant8").Operation("SUB", input0, input1, activation).To(output0)
+
+input0_values = [-28, 72]
+input1_values = [-127, -126,
+ -125, -124]
+output_values = [-29, 70,
+ -31, 68]
+
+Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+})
+
+#######################################################
+
+shape = "{2, 4, 16, 2}, 0.5, -128"
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", shape)
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", shape)
+activation = 0
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", shape)
+
+model = Model("quant8").Operation("SUB", input0, input1, activation).To(output0)
+
+input0_values = list(range(-128, 128))
+input1_values = list(input0_values)
+random.seed(0)
+random.shuffle(input1_values)
+output_values = [max(-128, (a - b) - 128) for a, b in zip(input0_values, input1_values)]
+
+Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+})
+
+#######################################################
+# SUB, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# SUB op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("SUB", zero_sized, i2, 0).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1, 2],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/tanh_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/tanh_quant8_signed.mod.py
new file mode 100644
index 000000000..13c54cfe9
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/tanh_quant8_signed.mod.py
@@ -0,0 +1,75 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import math
+
+input_scale, input_offset = 0.05, -28
+output_scale, output_offset = 1.0 / 128, 0 # Required.
+
+def dequantize(x):
+ return (x - input_offset) * input_scale
+
+def quantize(x):
+ return max(-128, min(127, int(round(x / output_scale)) + output_offset))
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{256}, %g, %d" % (input_scale, input_offset))
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{256}, %g, %d" % (output_scale, output_offset))
+model = Model().Operation("TANH", input0).To(output0)
+
+input_values = list(range(-128, 128))
+output_values = [quantize(math.tanh(dequantize(x))) for x in input_values]
+
+Example({
+ input0: input_values,
+ output0: output_values,
+})
+
+#######################################################
+# zero-sized input
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TANH op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("TANH", zero_sized).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0 / 128, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/tile_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/tile_quant8_signed.mod.py
new file mode 100644
index 000000000..6349ee5e0
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/tile_quant8_signed.mod.py
@@ -0,0 +1,90 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{3}")
+multipliers = Input("multipliers", "TENSOR_INT32", "{1}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{6}")
+
+model = Model().Operation("TILE", input0, multipliers).To(output0)
+
+input_values = [1.2, -3.4, 5.6]
+multiplier_values = [2]
+output_values = input_values + input_values
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+})
+
+Example({
+ input0: input_values,
+ multipliers: multiplier_values,
+ output0: output_values,
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 3}")
+multipliers = Input("multipliers", "TENSOR_INT32", "{2}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{4, 3}")
+
+model = Model().Operation("TILE", input0, multipliers).To(output0)
+
+input_values = [11, 12, 13,
+ 21, 22, 23]
+multiplier_values = [2, 1]
+output_values = [11, 12, 13,
+ 21, 22, 23,
+ 11, 12, 13,
+ 21, 22, 23]
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+})
+
+Example({
+ input0: input_values,
+ multipliers: multiplier_values,
+ output0: output_values,
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 2, 3}")
+multipliers = Input("multipliers", "TENSOR_INT32", "{3}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 6, 3}")
+
+model = Model().Operation("TILE", input0, multipliers).To(output0)
+
+input_values = [11, 12, 13,
+ 21, 22, 23]
+multiplier_values = [2, 3, 1]
+output_values = [11, 12, 13, 21, 22, 23, 11, 12, 13,
+ 21, 22, 23, 11, 12, 13, 21, 22, 23,
+ 11, 12, 13, 21, 22, 23, 11, 12, 13,
+ 21, 22, 23, 11, 12, 13, 21, 22, 23]
+
+quant8_signed = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+ output0: ["TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -1],
+})
+
+Example({
+ input0: input_values,
+ multipliers: multiplier_values,
+ output0: output_values,
+}).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/topk_v2_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/topk_v2_quant8_signed.mod.py
new file mode 100644
index 000000000..3cdc9d7b7
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/topk_v2_quant8_signed.mod.py
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+inp = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 2.0, 0")
+inp_data = [-127, -126, -125, 123, 122, 121]
+k = Int32Scalar("k", 2)
+out_values = Output("out_values", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 2.0, 0")
+out_values_data = [-125, -126, 123, 122]
+out_indices = Output("out_indices", "TENSOR_INT32", "{2, 2}")
+out_indices_data = [2, 1, 0, 1]
+
+model = Model().Operation("TOPK_V2", inp, k).To(out_values, out_indices)
+Example(
+ {
+ inp: inp_data,
+ out_values: out_values_data,
+ out_indices: out_indices_data
+ },
+ model=model)
diff --git a/nn/runtime/test/specs/V1_3/transpose_conv2d_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/transpose_conv2d_quant8_signed.mod.py
new file mode 100644
index 000000000..293adbf4d
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/transpose_conv2d_quant8_signed.mod.py
@@ -0,0 +1,317 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TRANSPOSE_CONV2D_LARGE, pad = same, stride = 32
+i1 = Input("op1", "TENSOR_FLOAT32", "{25, 1, 1, 1}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{16, 1, 1, 1}", [1] * 16) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{16}", [0] * 16) # bias
+s1 = Int32Vector("shape", [25, 32, 32, 16]) # output shape
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{25, 32, 32, 16}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 1, 32, 32, act, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5] * 16)),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125] * 16, hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
+})
+
+Example({
+ i1: [1] * 25,
+ o1: ([1] * 16 + [0] * (32 * 32 - 1) * 16) * 25
+}).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TRANSPOSE_CONV2D, pad = valid, stride = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18]) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
+s1 = Int32Vector("shape", [1, 5, 5, 2]) # output shape
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+quant8_signed_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
+})
+
+channelQuant8_signed_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
+})
+
+Example({
+ i1: [1, 2, 3, 4],
+ o1: [-0.5, 0, 1.5, 2, 5.5, 8, 4.5, 6, 8.5, 10,
+ 5.5, 6, 7.5, 8, 23.5, 26, 16.5, 18, 20.5, 22,
+ 14.5, 18, 22.5, 26, 60.5, 70, 40.5, 46, 52.5, 58,
+ 19.5, 22, 25.5, 28, 59.5, 66, 34.5, 38, 42.5, 46,
+ 37.5, 40, 43.5, 46, 101.5, 108, 58.5, 62, 66.5, 70]
+}).AddNchw(i1, o1, s1, layout).AddAllActivations(o1, act).AddVariations(quant8_signed, quant8_signed_mult_gt_1, channelQuant8_signed, channelQuant8_signed_mult_gt_1, includeDefault=False)
+
+#######################################################
+
+# TRANSPOSE_CONV2D_LARGE, pad = same, stride = 3, act = relu
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 2, 1}") # input 0
+w2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [9, 5, 6, 9, 8, 5, 3, 1, 4]) # weight
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-1000]) # bias
+s2 = Int32Vector("shape", [1, 3, 4, 1]) # output shape
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i2, w2, b2, s2, 1, 3, 3, 1, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -128),
+ w2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ b2: ("TENSOR_INT32", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 20.0, -78)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -128),
+ w2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 20.0, -78)
+})
+
+Example({
+ i2: [300, 500],
+ o2: [500., 800., 3500., 1500.,
+ 1400., 500., 3500., 3000.,
+ 0., 200., 500., 0.]
+}).AddNchw(i2, o2, s2, layout).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 1, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s3 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i3, w3, b3, s3, 1, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b3: ("TENSOR_INT32", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 16.0, -128)
+})
+
+Example({
+ i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o3: [184, 412, 568, 528,
+ 678, 1347, 1689, 1434,
+ 1494, 2715, 3057, 2442,
+ 1968, 3352, 3652, 2760]
+}).AddNchw(i3, o3, s3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_VALID, outputShape = [1, 6, 6, 1], pad = valid, stride = 1, act = none
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w4 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s4 = Int32Vector("shape", [1, 6, 6, 1]) # output shape
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 6, 6, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -118),
+ w4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b4: ("TENSOR_INT32", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 32.0, -48)
+})
+
+Example({
+ i4: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o4: [5, 22, 59, 101, 114, 83,
+ 52, 184, 412, 568, 528, 344,
+ 237, 678, 1347, 1689, 1434, 879,
+ 597, 1494, 2715, 3057, 2442, 1431,
+ 856, 1968, 3352, 3652, 2760, 1548,
+ 689, 1534, 2543, 2729, 2010, 1103]
+}).AddNchw(i4, o4, s4, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_EXPLICIT, pad = [1, 2, 2, 1], stride = 1, act = none
+i5 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w5 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b5 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+o5 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i5, w5, b5, 1, 2, 2, 1, 1, 1, 0, layout).To(o5)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ b5: ("TENSOR_INT32", 0.125, 0),
+ o5: ("TENSOR_QUANT8_ASYMM_SIGNED", 20.0, -78)
+})
+
+Example({
+ i5: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o5: [678, 1347, 1689,
+ 1494, 2715, 3057,
+ 1968, 3352, 3652]
+}).AddNchw(i5, o5, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE_CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5, 2, 4, 6, 8, 10, 12, 10, 8, 6]) # weight
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
+s = Int32Vector("shape", [0, 5, 5, 2]) # output shape
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 5, 5, 2}") # out
+model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, s, 2, 2, 2, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, s, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 4, 4, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 4, 4, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE_CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5]) # weight
+b = Parameter("bias", "TENSOR_FLOAT32", "{1}", [-1.5]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, 1, 2, 2, 1, 1, 1, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 2, act = none
+i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+w8 = Parameter("op2", "TENSOR_FLOAT32", "{1, 1, 1, 1}", [2]) # weight
+b8 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s8 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
+o8 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i8: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w8: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b8: ("TENSOR_INT32", 0.25, 0),
+ o8: ("TENSOR_QUANT8_ASYMM_SIGNED", 16.0, -128)
+})
+
+Example({
+ i8: [1, 2, 3, 4],
+ o8: [2, 0, 4, 0, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 0, 0]
+}).AddNchw(i8, o8, s8, layout).AddVariations(quant8_signed, includeDefault=False)
diff --git a/nn/runtime/test/specs/V1_3/transpose_quant8_signed.mod.py b/nn/runtime/test/specs/V1_3/transpose_quant8_signed.mod.py
new file mode 100644
index 000000000..2e4850b38
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/transpose_quant8_signed.mod.py
@@ -0,0 +1,415 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TRANSPOSE_CONV2D_LARGE, pad = same, stride = 32
+i1 = Input("op1", "TENSOR_FLOAT32", "{25, 1, 1, 1}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{16, 1, 1, 1}", [1] * 16) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{16}", [0] * 16) # bias
+s1 = Int32Vector("shape", [25, 32, 32, 16]) # output shape
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{25, 32, 32, 16}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 1, 32, 32, act, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5] * 16)),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125] * 16, hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
+})
+
+Example({
+ i1: [1] * 25,
+ o1: ([1] * 16 + [0] * (32 * 32 - 1) * 16) * 25
+}).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
+
+#######################################################
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TRANSPOSE_CONV2D, pad = valid, stride = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18]) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
+s1 = Int32Vector("shape", [1, 5, 5, 2]) # output shape
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+quant8_signed_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
+})
+
+channelQuant8_signed_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -28),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
+})
+
+Example({
+ i1: [1, 2, 3, 4],
+ o1: [-0.5, 0, 1.5, 2, 5.5, 8, 4.5, 6, 8.5, 10,
+ 5.5, 6, 7.5, 8, 23.5, 26, 16.5, 18, 20.5, 22,
+ 14.5, 18, 22.5, 26, 60.5, 70, 40.5, 46, 52.5, 58,
+ 19.5, 22, 25.5, 28, 59.5, 66, 34.5, 38, 42.5, 46,
+ 37.5, 40, 43.5, 46, 101.5, 108, 58.5, 62, 66.5, 70]
+}).AddNchw(i1, o1, s1, layout).AddAllActivations(o1, act).AddVariations(quant8_signed, quant8_signed_mult_gt_1, channelQuant8_signed, channelQuant8_signed_mult_gt_1, includeDefault=False)
+
+#######################################################
+
+# TRANSPOSE_CONV2D_LARGE, pad = same, stride = 3, act = relu
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 2, 1}") # input 0
+w2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [9, 5, 6, 9, 8, 5, 3, 1, 4]) # weight
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-1000]) # bias
+s2 = Int32Vector("shape", [1, 3, 4, 1]) # output shape
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i2, w2, b2, s2, 1, 3, 3, 1, layout).To(o2)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -128),
+ w2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ b2: ("TENSOR_INT32", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 20.0, -78)
+})
+
+# Per-channel quantization
+channelQuant8_signed = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 2.0, -128),
+ w2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 20.0, -78)
+})
+
+Example({
+ i2: [300, 500],
+ o2: [500., 800., 3500., 1500.,
+ 1400., 500., 3500., 3000.,
+ 0., 200., 500., 0.]
+}).AddNchw(i2, o2, s2, layout).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
+
+#######################################################
+
+# TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 1, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s3 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i3, w3, b3, s3, 1, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b3: ("TENSOR_INT32", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 16.0, -128)
+})
+
+Example({
+ i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o3: [184, 412, 568, 528,
+ 678, 1347, 1689, 1434,
+ 1494, 2715, 3057, 2442,
+ 1968, 3352, 3652, 2760]
+}).AddNchw(i3, o3, s3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_VALID, outputShape = [1, 6, 6, 1], pad = valid, stride = 1, act = none
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w4 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s4 = Int32Vector("shape", [1, 6, 6, 1]) # output shape
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 6, 6, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -118),
+ w4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b4: ("TENSOR_INT32", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 32.0, -48)
+})
+
+Example({
+ i4: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o4: [5, 22, 59, 101, 114, 83,
+ 52, 184, 412, 568, 528, 344,
+ 237, 678, 1347, 1689, 1434, 879,
+ 597, 1494, 2715, 3057, 2442, 1431,
+ 856, 1968, 3352, 3652, 2760, 1548,
+ 689, 1534, 2543, 2729, 2010, 1103]
+}).AddNchw(i4, o4, s4, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_EXPLICIT, pad = [1, 2, 2, 1], stride = 1, act = none
+i5 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w5 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b5 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+o5 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i5, w5, b5, 1, 2, 2, 1, 1, 1, 0, layout).To(o5)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
+ b5: ("TENSOR_INT32", 0.125, 0),
+ o5: ("TENSOR_QUANT8_ASYMM_SIGNED", 20.0, -78)
+})
+
+Example({
+ i5: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o5: [678, 1347, 1689,
+ 1494, 2715, 3057,
+ 1968, 3352, 3652]
+}).AddNchw(i5, o5, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE_CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5, 2, 4, 6, 8, 10, 12, 10, 8, 6]) # weight
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
+s = Int32Vector("shape", [0, 5, 5, 2]) # output shape
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 5, 5, 2}") # out
+model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, s, 2, 2, 2, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, s, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 4, 4, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 4, 4, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE_CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5]) # weight
+b = Parameter("bias", "TENSOR_FLOAT32", "{1}", [-1.5]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, 1, 2, 2, 1, 1, 1, 0, layout).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ w: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 2, act = none
+i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+w8 = Parameter("op2", "TENSOR_FLOAT32", "{1, 1, 1, 1}", [2]) # weight
+b8 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s8 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
+o8 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i8: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
+ w8: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
+ b8: ("TENSOR_INT32", 0.25, 0),
+ o8: ("TENSOR_QUANT8_ASYMM_SIGNED", 16.0, -128)
+})
+
+Example({
+ i8: [1, 2, 3, 4],
+ o8: [2, 0, 4, 0, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 0, 0]
+}).AddNchw(i8, o8, s8, layout).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3, 4, 5}, 1.0, -128")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [2, 0, 1, 3])
+output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 2, 3, 5}, 1.0, -128")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}
+
+output0 = {output: # output 0
+ [0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
+ 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
+ 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
+ 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
+ 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
+ 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+ 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
+ 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119]}
+
+Example((input0, output0))
+
+#######################################################
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 2}")
+perms = Input("perms", "TENSOR_INT32", "{0}")
+output = Output("output", "TENSOR_FLOAT32", "{2, 2}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Additional data type
+quant8_signed = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
+ output: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
+})
+
+# Instantiate an example
+Example({
+ i1: [1.0, 2.0,
+ 3.0, 4.0],
+ perms: [],
+ output: [1.0, 3.0,
+ 2.0, 4.0]
+}).AddVariations(quant8_signed, includeDefault=False)
+
+#######################################################
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 2, 2}") # out
+model = model.Operation("TRANSPOSE", zero_sized, [0, 3, 1, 2]).To(o3)
+
+quant8_signed = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
+ o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
+})
+
+Example({
+ i1: [1],
+ o1: [],
+ o2: [],
+ o3: [],
+}).AddVariations(quant8_signed, includeDefault=False)