summaryrefslogtreecommitdiff
path: root/nn/runtime/test/specs/V1_3
diff options
context:
space:
mode:
authorSlava Shklyaev <slavash@google.com>2020-03-11 14:59:36 +0000
committerSlava Shklyaev <slavash@google.com>2020-03-16 10:54:32 +0000
commit3844591c061d404697d711f84351e4b2ce9cf01a (patch)
tree9081f99a3e38387d4ab91eb545622d7683792333 /nn/runtime/test/specs/V1_3
parent87741faed0bfbf2ff5c754380f3b29c885749799 (diff)
downloadml-3844591c061d404697d711f84351e4b2ce9cf01a.tar.gz
Validate type of dimensions in test generator
This change - adds "dimensions" type validation in the test generator, - fixes incorrect usages, - adds new control flow and QLSTM tests that were previously omitted due to this bug, - refactors QLSTM tests for readability and ease of maintenance, and - updates README to use the new type format. Fix: 151217992 Test: generate_all_tests.sh Test: NNT_static Change-Id: I55b47a7ff02cb5ec25150d6be322ee3dbe9dcf43 Merged-In: I55b47a7ff02cb5ec25150d6be322ee3dbe9dcf43 (cherry picked from commit 85f3703a1b159b88bbb9ac39993eba4658dc4fda)
Diffstat (limited to 'nn/runtime/test/specs/V1_3')
-rw-r--r--nn/runtime/test/specs/V1_3/if_constant.mod.py4
-rw-r--r--nn/runtime/test/specs/V1_3/if_simple.mod.py4
-rw-r--r--nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py106
-rw-r--r--nn/runtime/test/specs/V1_3/qlstm_projection.mod.py106
-rw-r--r--nn/runtime/test/specs/V1_3/while_fib.mod.py10
-rw-r--r--nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py4
-rw-r--r--nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py6
7 files changed, 96 insertions, 144 deletions
diff --git a/nn/runtime/test/specs/V1_3/if_constant.mod.py b/nn/runtime/test/specs/V1_3/if_constant.mod.py
index badf4476b..ae6750146 100644
--- a/nn/runtime/test/specs/V1_3/if_constant.mod.py
+++ b/nn/runtime/test/specs/V1_3/if_constant.mod.py
@@ -24,8 +24,8 @@ output_data = {
False: [x - y for (x, y) in zip(x_data, y_data)],
}
-ValueType = ["TENSOR_FLOAT32", "{3, 4}"]
-BoolType = ["TENSOR_BOOL8", "{1}"]
+ValueType = ["TENSOR_FLOAT32", [3, 4]]
+BoolType = ["TENSOR_BOOL8", [1]]
def MakeBranchModel(operation_name):
x = Input("x", ValueType)
diff --git a/nn/runtime/test/specs/V1_3/if_simple.mod.py b/nn/runtime/test/specs/V1_3/if_simple.mod.py
index 384bc2cf7..c13446588 100644
--- a/nn/runtime/test/specs/V1_3/if_simple.mod.py
+++ b/nn/runtime/test/specs/V1_3/if_simple.mod.py
@@ -20,8 +20,8 @@ input_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
output_add = [y + 100 for y in input_data]
output_sub = [y - 100 for y in input_data]
-ValueType = ["TENSOR_FLOAT32", "{3, 4}"]
-BoolType = ["TENSOR_BOOL8", "{1}"]
+ValueType = ["TENSOR_FLOAT32", [3, 4]]
+BoolType = ["TENSOR_BOOL8", [1]]
def MakeBranchModel(operation_name):
y = Input("y", ValueType)
diff --git a/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py b/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
index 41704f83b..18300a900 100644
--- a/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
+++ b/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
@@ -24,65 +24,46 @@ input_size = 5
num_units = 4
output_size = 4
-input = Input("input",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, input_size), 0.0078125, 0))
-
-input_to_input_weights = Input("input_to_input_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-input_to_forget_weights = Input("input_to_forget_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-input_to_cell_weights = Input("input_to_cell_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-input_to_output_weights = Input("input_to_output_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-
-recurrent_to_input_weights = Input("recurrent_to_input_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-recurrent_to_cell_weights = Input("recurrent_to_cell_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-recurrent_to_output_weights = Input("recurrent_to_output_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-
-cell_to_input_weights = Input("cell_to_input_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0))
-cell_to_forget_weights = Input("cell_to_forget_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0))
-cell_to_output_weights = Input("cell_to_output_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0))
-
-input_gate_bias = Input("input_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
-forget_gate_bias = Input("forget_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
-cell_gate_bias = Input("cell_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
-output_gate_bias = Input("output_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
+InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0)
+input = Input("input", InputType)
+
+InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0)
+input_to_input_weights = Input("input_to_input_weights", InputWeightsType)
+input_to_forget_weights = Input("input_to_forget_weights", InputWeightsType)
+input_to_cell_weights = Input("input_to_cell_weights", InputWeightsType)
+input_to_output_weights = Input("input_to_output_weights", InputWeightsType)
+
+RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0)
+recurrent_to_input_weights = Input("recurrent_to_input_weights", RecurrentWeightsType)
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", RecurrentWeightsType)
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", RecurrentWeightsType)
+recurrent_to_output_weights = Input("recurrent_to_output_weights", RecurrentWeightsType)
+
+CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0)
+cell_to_input_weights = Input("cell_to_input_weights", CellWeightsType)
+cell_to_forget_weights = Input("cell_to_forget_weights", CellWeightsType)
+cell_to_output_weights = Input("cell_to_output_weights", CellWeightsType)
+
+BiasType = ("TENSOR_INT32", [num_units], 4.65661e-08, 0)
+input_gate_bias = Input("input_gate_bias", BiasType)
+forget_gate_bias = Input("forget_gate_bias", BiasType)
+cell_gate_bias = Input("cell_gate_bias", BiasType)
+output_gate_bias = Input("output_gate_bias", BiasType)
projection_weights = Input("projection_weights",
- ("TENSOR_QUANT8_SYMM", "{%d,%d}" % (output_size, num_units), 0.00392157, 0))
-projection_bias = Input("projection_bias", "TENSOR_INT32", "{%d}" % (output_size))
-
-output_state_in = Input("output_state_in",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size),
- 3.05176e-05, 0))
-cell_state_in = Input("cell_state_in",
- ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0))
-
-input_layer_norm_weights = Input("input_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
-forget_layer_norm_weights = Input("forget_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
-cell_layer_norm_weights = Input("cell_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
-output_layer_norm_weights = Input("output_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
+ ("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0))
+projection_bias = Input("projection_bias", ("TENSOR_INT32", [output_size]))
+
+OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0)
+CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
+output_state_in = Input("output_state_in", OutputStateType)
+cell_state_in = Input("cell_state_in", CellStateType)
+
+LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0)
+input_layer_norm_weights = Input("input_layer_norm_weights", LayerNormType)
+forget_layer_norm_weights = Input("forget_layer_norm_weights", LayerNormType)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", LayerNormType)
+output_layer_norm_weights = Input("output_layer_norm_weights", LayerNormType)
cell_clip = Float32Scalar("cell_clip", 0.)
projection_clip = Float32Scalar("projection_clip", 0.)
@@ -94,14 +75,9 @@ output_intermediate_scale = Float32Scalar("output_intermediate_scale", 0.007812)
hidden_state_zero_point = Int32Scalar("hidden_state_zero_point", 0)
hidden_state_scale = Float32Scalar("hidden_state_scale", 0.007)
-output_state_out = Output("output_state_out",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size),
- 3.05176e-05, 0))
-cell_state_out = Output("cell_state_out",
- ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0))
-output = Output("output",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size),
- 3.05176e-05, 0))
+output_state_out = Output("output_state_out", OutputStateType)
+cell_state_out = Output("cell_state_out", CellStateType)
+output = Output("output", OutputStateType)
model = model.Operation(
"QUANTIZED_LSTM", input, input_to_input_weights, input_to_forget_weights,
diff --git a/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py b/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py
index 07fa1ecec..483602f21 100644
--- a/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py
+++ b/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py
@@ -24,65 +24,46 @@ input_size = 5
num_units = 4
output_size = 3
-input = Input("input",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, input_size), 0.0078125, 0))
-
-input_to_input_weights = Input("input_to_input_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-input_to_forget_weights = Input("input_to_forget_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-input_to_cell_weights = Input("input_to_cell_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-input_to_output_weights = Input("input_to_output_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0))
-
-recurrent_to_input_weights = Input("recurrent_to_input_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-recurrent_to_cell_weights = Input("recurrent_to_cell_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-recurrent_to_output_weights = Input("recurrent_to_output_weights",
- ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size),
- 0.00784314, 0))
-
-cell_to_input_weights = Input("cell_to_input_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0))
-cell_to_forget_weights = Input("cell_to_forget_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0))
-cell_to_output_weights = Input("cell_to_output_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0))
-
-input_gate_bias = Input("input_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
-forget_gate_bias = Input("forget_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
-cell_gate_bias = Input("cell_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
-output_gate_bias = Input("output_gate_bias",
- ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0))
+InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0)
+input = Input("input", InputType)
+
+InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0)
+input_to_input_weights = Input("input_to_input_weights", InputWeightsType)
+input_to_forget_weights = Input("input_to_forget_weights", InputWeightsType)
+input_to_cell_weights = Input("input_to_cell_weights", InputWeightsType)
+input_to_output_weights = Input("input_to_output_weights", InputWeightsType)
+
+RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0)
+recurrent_to_input_weights = Input("recurrent_to_input_weights", RecurrentWeightsType)
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", RecurrentWeightsType)
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", RecurrentWeightsType)
+recurrent_to_output_weights = Input("recurrent_to_output_weights", RecurrentWeightsType)
+
+CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0)
+cell_to_input_weights = Input("cell_to_input_weights", CellWeightsType)
+cell_to_forget_weights = Input("cell_to_forget_weights", CellWeightsType)
+cell_to_output_weights = Input("cell_to_output_weights", CellWeightsType)
+
+BiasType = ("TENSOR_INT32", [num_units], 4.65661e-08, 0)
+input_gate_bias = Input("input_gate_bias", BiasType)
+forget_gate_bias = Input("forget_gate_bias", BiasType)
+cell_gate_bias = Input("cell_gate_bias", BiasType)
+output_gate_bias = Input("output_gate_bias", BiasType)
projection_weights = Input("projection_weights",
- ("TENSOR_QUANT8_SYMM", "{%d,%d}" % (output_size, num_units), 0.00392157, 0))
-projection_bias = Input("projection_bias", "TENSOR_INT32", "{%d}" % (output_size))
-
-output_state_in = Input("output_state_in",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size),
- 3.05176e-05, 0))
-cell_state_in = Input("cell_state_in",
- ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0))
-
-input_layer_norm_weights = Input("input_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
-forget_layer_norm_weights = Input("forget_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
-cell_layer_norm_weights = Input("cell_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
-output_layer_norm_weights = Input("output_layer_norm_weights",
- ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0))
+ ("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0))
+projection_bias = Input("projection_bias", ("TENSOR_INT32", [output_size]))
+
+OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0)
+CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
+output_state_in = Input("output_state_in", OutputStateType)
+cell_state_in = Input("cell_state_in", CellStateType)
+
+LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0)
+input_layer_norm_weights = Input("input_layer_norm_weights", LayerNormType)
+forget_layer_norm_weights = Input("forget_layer_norm_weights", LayerNormType)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", LayerNormType)
+output_layer_norm_weights = Input("output_layer_norm_weights", LayerNormType)
cell_clip = Float32Scalar("cell_clip", 0.)
projection_clip = Float32Scalar("projection_clip", 0.)
@@ -94,14 +75,9 @@ output_intermediate_scale = Float32Scalar("output_intermediate_scale", 0.007812)
hidden_state_zero_point = Int32Scalar("hidden_state_zero_point", 0)
hidden_state_scale = Float32Scalar("hidden_state_scale", 0.007)
-output_state_out = Output("output_state_out",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size),
- 3.05176e-05, 0))
-cell_state_out = Output("cell_state_out",
- ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0))
-output = Output("output",
- ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size),
- 3.05176e-05, 0))
+output_state_out = Output("output_state_out", OutputStateType)
+cell_state_out = Output("cell_state_out", CellStateType)
+output = Output("output", OutputStateType)
model = model.Operation(
"QUANTIZED_LSTM", input, input_to_input_weights, input_to_forget_weights,
diff --git a/nn/runtime/test/specs/V1_3/while_fib.mod.py b/nn/runtime/test/specs/V1_3/while_fib.mod.py
index ab2a5bc0c..9f36b06b2 100644
--- a/nn/runtime/test/specs/V1_3/while_fib.mod.py
+++ b/nn/runtime/test/specs/V1_3/while_fib.mod.py
@@ -23,9 +23,9 @@
# 1 1])
# i = i + 1
-FibType = ["TENSOR_FLOAT32", "{1, 2}"]
-CounterType = ["TENSOR_INT32", "{1}"]
-BoolType = ["TENSOR_BOOL8", "{1}"]
+FibType = ["TENSOR_FLOAT32", [1, 2]]
+CounterType = ["TENSOR_INT32", [1]]
+BoolType = ["TENSOR_BOOL8", [1]]
def MakeConditionModel():
fib = Input("fib", FibType)
@@ -44,8 +44,8 @@ def MakeBodyModel():
n = Input("n", CounterType)
fib_out = Output("fib_out", FibType)
i_out = Output("i_out", CounterType)
- matrix = Parameter("matrix", ["TENSOR_FLOAT32", "{2, 2}"], [0, 1, 1, 1])
- zero_bias = Parameter("zero_bias", ["TENSOR_FLOAT32", "{2, 1}"], [0, 0])
+ matrix = Parameter("matrix", ["TENSOR_FLOAT32", [2, 2]], [0, 1, 1, 1])
+ zero_bias = Parameter("zero_bias", ["TENSOR_FLOAT32", [2, 1]], [0, 0])
model = Model()
model.IdentifyInputs(fib, i, n)
model.IdentifyOutputs(fib_out, i_out)
diff --git a/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py b/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py
index e322123c4..bae61aa5b 100644
--- a/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py
+++ b/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py
@@ -21,8 +21,8 @@
# while i >= n:
# i = i + 1.0
-CounterType = ["TENSOR_FLOAT32", "{1}"]
-BoolType = ["TENSOR_BOOL8", "{1}"]
+CounterType = ["TENSOR_FLOAT32", [1]]
+BoolType = ["TENSOR_BOOL8", [1]]
def MakeConditionModel():
i = Input("i", CounterType)
diff --git a/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py b/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py
index 1cb35b4a4..3f8a6344f 100644
--- a/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py
+++ b/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py
@@ -27,9 +27,9 @@
# sum = sum + xi
# i = i + 1
-DataType = ["TENSOR_FLOAT32", "{1, 2}"]
-CounterType = ["TENSOR_INT32", "{1}"]
-BoolType = ["TENSOR_BOOL8", "{1}"]
+DataType = ["TENSOR_FLOAT32", [1, 2]]
+CounterType = ["TENSOR_INT32", [1]]
+BoolType = ["TENSOR_BOOL8", [1]]
def MakeInnerConditionModel():
xi = Input("xi", DataType)