diff options
author | Slava Shklyaev <slavash@google.com> | 2020-03-11 14:59:36 +0000 |
---|---|---|
committer | Slava Shklyaev <slavash@google.com> | 2020-03-16 10:54:32 +0000 |
commit | 3844591c061d404697d711f84351e4b2ce9cf01a (patch) | |
tree | 9081f99a3e38387d4ab91eb545622d7683792333 /nn/runtime/test/specs | |
parent | 87741faed0bfbf2ff5c754380f3b29c885749799 (diff) | |
download | ml-3844591c061d404697d711f84351e4b2ce9cf01a.tar.gz |
Validate type of dimensions in test generator
This change
- adds "dimensions" type validation in the test generator,
- fixes incorrect usages,
- adds new control flow and QLSTM tests that were previously omitted
due to this bug,
- refactors QLSTM tests for readability and ease of maintenance, and
- updates README to use the new type format.
Fix: 151217992
Test: generate_all_tests.sh
Test: NNT_static
Change-Id: I55b47a7ff02cb5ec25150d6be322ee3dbe9dcf43
Merged-In: I55b47a7ff02cb5ec25150d6be322ee3dbe9dcf43
(cherry picked from commit 85f3703a1b159b88bbb9ac39993eba4658dc4fda)
Diffstat (limited to 'nn/runtime/test/specs')
-rw-r--r-- | nn/runtime/test/specs/V1_2/quantized_lstm.mod.py | 141 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/if_constant.mod.py | 4 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/if_simple.mod.py | 4 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py | 106 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/qlstm_projection.mod.py | 106 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/while_fib.mod.py | 10 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py | 4 | ||||
-rw-r--r-- | nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py | 6 |
8 files changed, 156 insertions, 225 deletions
diff --git a/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py b/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py index 5fd4c7a84..538bc7d61 100644 --- a/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py +++ b/nn/runtime/test/specs/V1_2/quantized_lstm.mod.py @@ -22,31 +22,39 @@ n_input = 2 n_cell = 4 n_output = n_cell -input_ = Input("input", ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128)) +InputType = ("TENSOR_QUANT8_ASYMM", [n_batch, n_input], 1 / 128, 128) +input_ = Input("input", InputType) weights_scale = 0.00408021 weights_zero_point = 100 -input_to_input_weights = Input("inputToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point)) -input_to_forget_weights = Input("inputToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point)) -input_to_cell_weights = Input("inputToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point)) -input_to_output_weights = Input("inputToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point)) +InputWeightsType = ("TENSOR_QUANT8_ASYMM", + [n_output, n_input], weights_scale, weights_zero_point) +input_to_input_weights = Input("inputToInputWeights", InputWeightsType) +input_to_forget_weights = Input("inputToForgetWeights", InputWeightsType) +input_to_cell_weights = Input("inputToCellWeights", InputWeightsType) +input_to_output_weights = Input("inputToOutputWeights", InputWeightsType) -recurrent_to_input_weights = Input("recurrentToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point)) -recurrent_to_forget_weights = Input("recurrentToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point)) -recurrent_to_cell_weights = Input("recurrentToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point)) -recurrent_to_output_weights = Input("recurrentToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point)) +RecurrentWeightsType = ("TENSOR_QUANT8_ASYMM", + [n_output, n_output], weights_scale, weights_zero_point) +recurrent_to_input_weights = Input("recurrentToInputWeights", RecurrentWeightsType) +recurrent_to_forget_weights = Input("recurrentToForgetWeights", RecurrentWeightsType) +recurrent_to_cell_weights = Input("recurrentToCellWeights", RecurrentWeightsType) +recurrent_to_output_weights = Input("recurrentToOutputWeights", RecurrentWeightsType) -input_gate_bias = Input("inputGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0)) -forget_gate_bias = Input("forgetGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0)) -cell_gate_bias = Input("cellGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0)) -output_gate_bias = Input("outputGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0)) +BiasType = ("TENSOR_INT32", [n_output], weights_scale / 128., 0) +input_gate_bias = Input("inputGateBias", BiasType) +forget_gate_bias = Input("forgetGateBias", BiasType) +cell_gate_bias = Input("cellGateBias", BiasType) +output_gate_bias = Input("outputGateBias", BiasType) -prev_cell_state = Input("prevCellState", ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0)) -prev_output = Input("prevOutput", ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128)) +StateType = ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0) +OutputType = ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128) +prev_cell_state = Input("prevCellState", StateType) +prev_output = Input("prevOutput", OutputType) -cell_state_out = Output("cellStateOut", ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0)) -output = Output("output", ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128)) +cell_state_out = Output("cellStateOut", StateType) +output = Output("output", OutputType) model = model.Operation("QUANTIZED_16BIT_LSTM", @@ -101,80 +109,51 @@ n_input = 2 n_cell = 4 n_output = n_cell -input_ = Input("input", - ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128)) +InputType = ("TENSOR_QUANT8_ASYMM", [n_batch, n_input], 1 / 128, 128) +input_ = Input("input", InputType) weights_scale = 0.00408021 weights_zero_point = 100 -input_to_input_weights = Parameter( - "inputToInputWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_input), weights_scale, weights_zero_point), - [146, 250, 235, 171, 10, 218, 171, 108]) -input_to_forget_weights = Parameter( - "inputToForgetWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_input), weights_scale, weights_zero_point), - [24, 50, 132, 179, 158, 110, 3, 169]) -input_to_cell_weights = Parameter( - "inputToCellWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_input), weights_scale, weights_zero_point), - [133, 34, 29, 49, 206, 109, 54, 183]) -input_to_output_weights = Parameter( - "inputToOutputWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_input), weights_scale, weights_zero_point), - [195, 187, 11, 99, 109, 10, 218, 48]) - -recurrent_to_input_weights = Parameter( - "recurrentToInputWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_output), weights_scale, weights_zero_point), - [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26]) -recurrent_to_forget_weights = Parameter( - "recurrentToForgetWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_output), weights_scale, weights_zero_point), - [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253]) -recurrent_to_cell_weights = Parameter( - "recurrentToCellWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_output), weights_scale, weights_zero_point), - [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216]) -recurrent_to_output_weights = Parameter( - "recurrentToOutputWeights", - ("TENSOR_QUANT8_ASYMM", - (n_output, n_output), weights_scale, weights_zero_point), - [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98]) - -input_gate_bias = Parameter("inputGateBias", - ("TENSOR_INT32", - (n_output,), weights_scale / 128., 0), +InputWeightsType = ("TENSOR_QUANT8_ASYMM", + [n_output, n_input], weights_scale, weights_zero_point) +input_to_input_weights = Parameter("inputToInputWeights", InputWeightsType, + [146, 250, 235, 171, 10, 218, 171, 108]) +input_to_forget_weights = Parameter("inputToForgetWeights", InputWeightsType, + [24, 50, 132, 179, 158, 110, 3, 169]) +input_to_cell_weights = Parameter("inputToCellWeights", InputWeightsType, + [133, 34, 29, 49, 206, 109, 54, 183]) +input_to_output_weights = Parameter("inputToOutputWeights", InputWeightsType, + [195, 187, 11, 99, 109, 10, 218, 48]) + +RecurrentWeightsType = ("TENSOR_QUANT8_ASYMM", + [n_output, n_output], weights_scale, weights_zero_point) +recurrent_to_input_weights = Parameter("recurrentToInputWeights", RecurrentWeightsType, + [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26]) +recurrent_to_forget_weights = Parameter("recurrentToForgetWeights", RecurrentWeightsType, + [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253]) +recurrent_to_cell_weights = Parameter("recurrentToCellWeights", RecurrentWeightsType, + [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216]) +recurrent_to_output_weights = Parameter("recurrentToOutputWeights", RecurrentWeightsType, + [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98]) + +BiasType = ("TENSOR_INT32", [n_output], weights_scale / 128., 0) +input_gate_bias = Parameter("inputGateBias", BiasType, [-7876, 13488, -726, 32839]) -forget_gate_bias = Parameter("forgetGateBias", - ("TENSOR_INT32", - (n_output,), weights_scale / 128., 0), +forget_gate_bias = Parameter("forgetGateBias", BiasType, [9206, -46884, -11693, -38724]) -cell_gate_bias = Parameter("cellGateBias", - ("TENSOR_INT32", - (n_output,), weights_scale / 128., 0), +cell_gate_bias = Parameter("cellGateBias", BiasType, [39481, 48624, 48976, -21419]) -output_gate_bias = Parameter("outputGateBias", - ("TENSOR_INT32", - (n_output,), weights_scale / 128., 0), +output_gate_bias = Parameter("outputGateBias", BiasType, [-58999, -17050, -41852, -40538]) -prev_cell_state = Input("prevCellState", - ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0)) -prev_output = Input("prevOutput", - ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128)) +StateType = ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0) +OutputType = ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128) +prev_cell_state = Input("prevCellState", StateType) +prev_output = Input("prevOutput", OutputType) -cell_state_out = Output("cellStateOut", - ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0)) -output = Output("output", - ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128)) +cell_state_out = Output("cellStateOut", StateType) +output = Output("output", OutputType) model = model.Operation("QUANTIZED_16BIT_LSTM", input_, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, diff --git a/nn/runtime/test/specs/V1_3/if_constant.mod.py b/nn/runtime/test/specs/V1_3/if_constant.mod.py index badf4476b..ae6750146 100644 --- a/nn/runtime/test/specs/V1_3/if_constant.mod.py +++ b/nn/runtime/test/specs/V1_3/if_constant.mod.py @@ -24,8 +24,8 @@ output_data = { False: [x - y for (x, y) in zip(x_data, y_data)], } -ValueType = ["TENSOR_FLOAT32", "{3, 4}"] -BoolType = ["TENSOR_BOOL8", "{1}"] +ValueType = ["TENSOR_FLOAT32", [3, 4]] +BoolType = ["TENSOR_BOOL8", [1]] def MakeBranchModel(operation_name): x = Input("x", ValueType) diff --git a/nn/runtime/test/specs/V1_3/if_simple.mod.py b/nn/runtime/test/specs/V1_3/if_simple.mod.py index 384bc2cf7..c13446588 100644 --- a/nn/runtime/test/specs/V1_3/if_simple.mod.py +++ b/nn/runtime/test/specs/V1_3/if_simple.mod.py @@ -20,8 +20,8 @@ input_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] output_add = [y + 100 for y in input_data] output_sub = [y - 100 for y in input_data] -ValueType = ["TENSOR_FLOAT32", "{3, 4}"] -BoolType = ["TENSOR_BOOL8", "{1}"] +ValueType = ["TENSOR_FLOAT32", [3, 4]] +BoolType = ["TENSOR_BOOL8", [1]] def MakeBranchModel(operation_name): y = Input("y", ValueType) diff --git a/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py b/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py index 41704f83b..18300a900 100644 --- a/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py +++ b/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py @@ -24,65 +24,46 @@ input_size = 5 num_units = 4 output_size = 4 -input = Input("input", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, input_size), 0.0078125, 0)) - -input_to_input_weights = Input("input_to_input_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) -input_to_forget_weights = Input("input_to_forget_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) -input_to_cell_weights = Input("input_to_cell_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) -input_to_output_weights = Input("input_to_output_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) - -recurrent_to_input_weights = Input("recurrent_to_input_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) -recurrent_to_forget_weights = Input("recurrent_to_forget_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) -recurrent_to_cell_weights = Input("recurrent_to_cell_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) -recurrent_to_output_weights = Input("recurrent_to_output_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) - -cell_to_input_weights = Input("cell_to_input_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0)) -cell_to_forget_weights = Input("cell_to_forget_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0)) -cell_to_output_weights = Input("cell_to_output_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0)) - -input_gate_bias = Input("input_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) -forget_gate_bias = Input("forget_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) -cell_gate_bias = Input("cell_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) -output_gate_bias = Input("output_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) +InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0) +input = Input("input", InputType) + +InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0) +input_to_input_weights = Input("input_to_input_weights", InputWeightsType) +input_to_forget_weights = Input("input_to_forget_weights", InputWeightsType) +input_to_cell_weights = Input("input_to_cell_weights", InputWeightsType) +input_to_output_weights = Input("input_to_output_weights", InputWeightsType) + +RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0) +recurrent_to_input_weights = Input("recurrent_to_input_weights", RecurrentWeightsType) +recurrent_to_forget_weights = Input("recurrent_to_forget_weights", RecurrentWeightsType) +recurrent_to_cell_weights = Input("recurrent_to_cell_weights", RecurrentWeightsType) +recurrent_to_output_weights = Input("recurrent_to_output_weights", RecurrentWeightsType) + +CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0) +cell_to_input_weights = Input("cell_to_input_weights", CellWeightsType) +cell_to_forget_weights = Input("cell_to_forget_weights", CellWeightsType) +cell_to_output_weights = Input("cell_to_output_weights", CellWeightsType) + +BiasType = ("TENSOR_INT32", [num_units], 4.65661e-08, 0) +input_gate_bias = Input("input_gate_bias", BiasType) +forget_gate_bias = Input("forget_gate_bias", BiasType) +cell_gate_bias = Input("cell_gate_bias", BiasType) +output_gate_bias = Input("output_gate_bias", BiasType) projection_weights = Input("projection_weights", - ("TENSOR_QUANT8_SYMM", "{%d,%d}" % (output_size, num_units), 0.00392157, 0)) -projection_bias = Input("projection_bias", "TENSOR_INT32", "{%d}" % (output_size)) - -output_state_in = Input("output_state_in", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size), - 3.05176e-05, 0)) -cell_state_in = Input("cell_state_in", - ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0)) - -input_layer_norm_weights = Input("input_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) -forget_layer_norm_weights = Input("forget_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) -cell_layer_norm_weights = Input("cell_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) -output_layer_norm_weights = Input("output_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) + ("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0)) +projection_bias = Input("projection_bias", ("TENSOR_INT32", [output_size])) + +OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0) +CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0) +output_state_in = Input("output_state_in", OutputStateType) +cell_state_in = Input("cell_state_in", CellStateType) + +LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0) +input_layer_norm_weights = Input("input_layer_norm_weights", LayerNormType) +forget_layer_norm_weights = Input("forget_layer_norm_weights", LayerNormType) +cell_layer_norm_weights = Input("cell_layer_norm_weights", LayerNormType) +output_layer_norm_weights = Input("output_layer_norm_weights", LayerNormType) cell_clip = Float32Scalar("cell_clip", 0.) projection_clip = Float32Scalar("projection_clip", 0.) @@ -94,14 +75,9 @@ output_intermediate_scale = Float32Scalar("output_intermediate_scale", 0.007812) hidden_state_zero_point = Int32Scalar("hidden_state_zero_point", 0) hidden_state_scale = Float32Scalar("hidden_state_scale", 0.007) -output_state_out = Output("output_state_out", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size), - 3.05176e-05, 0)) -cell_state_out = Output("cell_state_out", - ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0)) -output = Output("output", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size), - 3.05176e-05, 0)) +output_state_out = Output("output_state_out", OutputStateType) +cell_state_out = Output("cell_state_out", CellStateType) +output = Output("output", OutputStateType) model = model.Operation( "QUANTIZED_LSTM", input, input_to_input_weights, input_to_forget_weights, diff --git a/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py b/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py index 07fa1ecec..483602f21 100644 --- a/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py +++ b/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py @@ -24,65 +24,46 @@ input_size = 5 num_units = 4 output_size = 3 -input = Input("input", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, input_size), 0.0078125, 0)) - -input_to_input_weights = Input("input_to_input_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) -input_to_forget_weights = Input("input_to_forget_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) -input_to_cell_weights = Input("input_to_cell_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) -input_to_output_weights = Input("input_to_output_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, input_size), 0.00784314, 0)) - -recurrent_to_input_weights = Input("recurrent_to_input_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) -recurrent_to_forget_weights = Input("recurrent_to_forget_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) -recurrent_to_cell_weights = Input("recurrent_to_cell_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) -recurrent_to_output_weights = Input("recurrent_to_output_weights", - ("TENSOR_QUANT8_SYMM", "{%d, %d}" % (num_units, output_size), - 0.00784314, 0)) - -cell_to_input_weights = Input("cell_to_input_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0)) -cell_to_forget_weights = Input("cell_to_forget_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0)) -cell_to_output_weights = Input("cell_to_output_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % (num_units), 1.0, 0)) - -input_gate_bias = Input("input_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) -forget_gate_bias = Input("forget_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) -cell_gate_bias = Input("cell_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) -output_gate_bias = Input("output_gate_bias", - ("TENSOR_INT32", "{%d}" % (num_units), 4.65661e-08, 0)) +InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0) +input = Input("input", InputType) + +InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0) +input_to_input_weights = Input("input_to_input_weights", InputWeightsType) +input_to_forget_weights = Input("input_to_forget_weights", InputWeightsType) +input_to_cell_weights = Input("input_to_cell_weights", InputWeightsType) +input_to_output_weights = Input("input_to_output_weights", InputWeightsType) + +RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0) +recurrent_to_input_weights = Input("recurrent_to_input_weights", RecurrentWeightsType) +recurrent_to_forget_weights = Input("recurrent_to_forget_weights", RecurrentWeightsType) +recurrent_to_cell_weights = Input("recurrent_to_cell_weights", RecurrentWeightsType) +recurrent_to_output_weights = Input("recurrent_to_output_weights", RecurrentWeightsType) + +CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0) +cell_to_input_weights = Input("cell_to_input_weights", CellWeightsType) +cell_to_forget_weights = Input("cell_to_forget_weights", CellWeightsType) +cell_to_output_weights = Input("cell_to_output_weights", CellWeightsType) + +BiasType = ("TENSOR_INT32", [num_units], 4.65661e-08, 0) +input_gate_bias = Input("input_gate_bias", BiasType) +forget_gate_bias = Input("forget_gate_bias", BiasType) +cell_gate_bias = Input("cell_gate_bias", BiasType) +output_gate_bias = Input("output_gate_bias", BiasType) projection_weights = Input("projection_weights", - ("TENSOR_QUANT8_SYMM", "{%d,%d}" % (output_size, num_units), 0.00392157, 0)) -projection_bias = Input("projection_bias", "TENSOR_INT32", "{%d}" % (output_size)) - -output_state_in = Input("output_state_in", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size), - 3.05176e-05, 0)) -cell_state_in = Input("cell_state_in", - ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0)) - -input_layer_norm_weights = Input("input_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) -forget_layer_norm_weights = Input("forget_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) -cell_layer_norm_weights = Input("cell_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) -output_layer_norm_weights = Input("output_layer_norm_weights", - ("TENSOR_QUANT16_SYMM", "{%d}" % num_units, 3.05182e-05, 0)) + ("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0)) +projection_bias = Input("projection_bias", ("TENSOR_INT32", [output_size])) + +OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0) +CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0) +output_state_in = Input("output_state_in", OutputStateType) +cell_state_in = Input("cell_state_in", CellStateType) + +LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0) +input_layer_norm_weights = Input("input_layer_norm_weights", LayerNormType) +forget_layer_norm_weights = Input("forget_layer_norm_weights", LayerNormType) +cell_layer_norm_weights = Input("cell_layer_norm_weights", LayerNormType) +output_layer_norm_weights = Input("output_layer_norm_weights", LayerNormType) cell_clip = Float32Scalar("cell_clip", 0.) projection_clip = Float32Scalar("projection_clip", 0.) @@ -94,14 +75,9 @@ output_intermediate_scale = Float32Scalar("output_intermediate_scale", 0.007812) hidden_state_zero_point = Int32Scalar("hidden_state_zero_point", 0) hidden_state_scale = Float32Scalar("hidden_state_scale", 0.007) -output_state_out = Output("output_state_out", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size), - 3.05176e-05, 0)) -cell_state_out = Output("cell_state_out", - ("TENSOR_QUANT16_SYMM", "{%d, %d}" % (batch_size, num_units), 3.05176e-05, 0)) -output = Output("output", - ("TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d}" % (batch_size, output_size), - 3.05176e-05, 0)) +output_state_out = Output("output_state_out", OutputStateType) +cell_state_out = Output("cell_state_out", CellStateType) +output = Output("output", OutputStateType) model = model.Operation( "QUANTIZED_LSTM", input, input_to_input_weights, input_to_forget_weights, diff --git a/nn/runtime/test/specs/V1_3/while_fib.mod.py b/nn/runtime/test/specs/V1_3/while_fib.mod.py index ab2a5bc0c..9f36b06b2 100644 --- a/nn/runtime/test/specs/V1_3/while_fib.mod.py +++ b/nn/runtime/test/specs/V1_3/while_fib.mod.py @@ -23,9 +23,9 @@ # 1 1]) # i = i + 1 -FibType = ["TENSOR_FLOAT32", "{1, 2}"] -CounterType = ["TENSOR_INT32", "{1}"] -BoolType = ["TENSOR_BOOL8", "{1}"] +FibType = ["TENSOR_FLOAT32", [1, 2]] +CounterType = ["TENSOR_INT32", [1]] +BoolType = ["TENSOR_BOOL8", [1]] def MakeConditionModel(): fib = Input("fib", FibType) @@ -44,8 +44,8 @@ def MakeBodyModel(): n = Input("n", CounterType) fib_out = Output("fib_out", FibType) i_out = Output("i_out", CounterType) - matrix = Parameter("matrix", ["TENSOR_FLOAT32", "{2, 2}"], [0, 1, 1, 1]) - zero_bias = Parameter("zero_bias", ["TENSOR_FLOAT32", "{2, 1}"], [0, 0]) + matrix = Parameter("matrix", ["TENSOR_FLOAT32", [2, 2]], [0, 1, 1, 1]) + zero_bias = Parameter("zero_bias", ["TENSOR_FLOAT32", [2, 1]], [0, 0]) model = Model() model.IdentifyInputs(fib, i, n) model.IdentifyOutputs(fib_out, i_out) diff --git a/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py b/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py index e322123c4..bae61aa5b 100644 --- a/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py +++ b/nn/runtime/test/specs/V1_3/while_infinite_loop.mod.py @@ -21,8 +21,8 @@ # while i >= n: # i = i + 1.0 -CounterType = ["TENSOR_FLOAT32", "{1}"] -BoolType = ["TENSOR_BOOL8", "{1}"] +CounterType = ["TENSOR_FLOAT32", [1]] +BoolType = ["TENSOR_BOOL8", [1]] def MakeConditionModel(): i = Input("i", CounterType) diff --git a/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py b/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py index 1cb35b4a4..3f8a6344f 100644 --- a/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py +++ b/nn/runtime/test/specs/V1_3/while_sum_of_powers.mod.py @@ -27,9 +27,9 @@ # sum = sum + xi # i = i + 1 -DataType = ["TENSOR_FLOAT32", "{1, 2}"] -CounterType = ["TENSOR_INT32", "{1}"] -BoolType = ["TENSOR_BOOL8", "{1}"] +DataType = ["TENSOR_FLOAT32", [1, 2]] +CounterType = ["TENSOR_INT32", [1]] +BoolType = ["TENSOR_BOOL8", [1]] def MakeInnerConditionModel(): xi = Input("xi", DataType) |