summaryrefslogtreecommitdiff
path: root/nn/runtime/test/specs/V1_3
diff options
context:
space:
mode:
authorLev Proleev <levp@google.com>2020-02-12 11:12:57 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2020-02-12 11:12:57 +0000
commit318cdef7711eba97b89030676ebbfee099875827 (patch)
tree7020046e488f6a3282e9946feb197e6c164ba27b /nn/runtime/test/specs/V1_3
parent29ff52aa05eeb6c61c3c4c6b48b8c031bfb56434 (diff)
parent7aee1ca40e9e0c18ad7a31c5ac3f5f056da44a29 (diff)
downloadml-318cdef7711eba97b89030676ebbfee099875827.tar.gz
Merge changes from topic "state_outputs"
* changes: Add a state output for BIDIRECTIONAL_SEQUENCE_RNN Add a state output for BIDIRECTIONAL_SEQUENCE_LSTM Add a state output for UNIDIRECTIONAL_SEQUENCE_LSTM Add a state output for UNIDIRECTIONAL_SEQUENCE_RNN
Diffstat (limited to 'nn/runtime/test/specs/V1_3')
-rw-r--r--nn/runtime/test/specs/V1_3/bidirectional_sequence_lstm_state_output.mod.py494
-rw-r--r--nn/runtime/test/specs/V1_3/bidirectional_sequence_rnn_state_output.mod.py585
-rw-r--r--nn/runtime/test/specs/V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.mod.py192
-rw-r--r--nn/runtime/test/specs/V1_3/unidirectional_sequence_rnn.mod.py229
4 files changed, 1500 insertions, 0 deletions
diff --git a/nn/runtime/test/specs/V1_3/bidirectional_sequence_lstm_state_output.mod.py b/nn/runtime/test/specs/V1_3/bidirectional_sequence_lstm_state_output.mod.py
new file mode 100644
index 000000000..73b004e6e
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/bidirectional_sequence_lstm_state_output.mod.py
@@ -0,0 +1,494 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT32, No Layer Normalization, No Cifg, No Peephole, No Projection, and No Clipping.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+bw_output=Output("bw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+
+fw_output_activation_state = Output("fw_output_activation_state",
+ "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(n_batch, n_output))
+fw_output_cell_state = Output("fw_output_cell_state", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(n_batch, n_cell))
+bw_output_activation_state = Output("bw_output_activation_state",
+ "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(n_batch, n_output))
+bw_output_cell_state = Output("bw_output_cell_state", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(n_batch, n_cell))
+
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],
+ fw_output_activation_state_data=[],
+ fw_output_cell_state_data=[],
+ bw_output_activation_state_data=[],
+ bw_output_cell_state_data=[],):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float32Scalar("cell_clip", 0.0)
+ proj_clip = Float32Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", True)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation,
+ cell_clip,
+ proj_clip,
+ merge_outputs,
+ time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,
+ ).To(fw_output, bw_output, fw_output_activation_state, fw_output_cell_state,
+ bw_output_activation_state, bw_output_cell_state)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ fw_output_activation_state: fw_output_activation_state_data,
+ fw_output_cell_state: fw_output_cell_state_data,
+ bw_output_activation_state: bw_output_activation_state_data,
+ bw_output_cell_state: bw_output_cell_state_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ -0.02973187, 0.1229473, 0.20885126, -0.15358765,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792
+]
+bw_golden_output_data = [
+ -0.0806187, 0.139077, 0.400476, -0.197842,
+ -0.0332076, 0.123838, 0.309777, -0.17621,
+ -0.0490733, 0.0739237, 0.067706, -0.0208124
+]
+
+fw_output_cell_state_data = [-0.41584, 0.1496, 0.407424, -0.252775]
+bw_output_cell_state_data = [-0.402085, 0.178675, 0.610687, -0.373812]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data,
+ fw_output_activation_state_data=fw_golden_output_data[2 * (n_batch * n_output):],
+ fw_output_cell_state_data=fw_output_cell_state_data,
+ bw_output_activation_state_data=bw_golden_output_data[:(n_batch * n_output)],
+ bw_output_cell_state_data=bw_output_cell_state_data,
+)
diff --git a/nn/runtime/test/specs/V1_3/bidirectional_sequence_rnn_state_output.mod.py b/nn/runtime/test/specs/V1_3/bidirectional_sequence_rnn_state_output.mod.py
new file mode 100644
index 000000000..7c1140f7a
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/bidirectional_sequence_rnn_state_output.mod.py
@@ -0,0 +1,585 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+import sys
+
+
+def convert_to_time_major(tensor, tensor_shape):
+ return np.array(tensor).reshape(tensor_shape).transpose([1, 0, 2
+ ]).flatten().tolist()
+
+
+def merge_outputs(a, a_shape, b, b_shape):
+ a = np.array(a).reshape(a_shape)
+ b = np.array(b).reshape(b_shape)
+ return np.concatenate((a, b), axis=2).flatten().tolist()
+
+
+def reverse_batch_major(tensor, tensor_shape):
+ return np.array(tensor).reshape(tensor_shape)[:, ::-1, :].flatten().tolist()
+
+
+def split_tensor_in_two(tensor, tensor_shape):
+ tensor = np.array(tensor).reshape(tensor_shape)
+ left, right = np.split(tensor, 2, axis=len(tensor_shape) - 1)
+ return left.flatten().tolist(), right.flatten().tolist()
+
+
+def test(
+ name, input, fw_weights, fw_recurrent_weights, fw_bias, fw_hidden_state,
+ bw_weights, bw_recurrent_weights, bw_bias, bw_hidden_state, aux_input,
+ fw_aux_weights, bw_aux_weights, activation, time_major, merge_outputs,
+ fw_output, bw_output, fw_output_hidden_state, bw_output_hidden_state,
+ input_data, fw_weights_data, fw_recurrent_weights_data, fw_bias_data,
+ fw_hidden_state_data, bw_weights_data, bw_recurrent_weights_data,
+ bw_bias_data, bw_hidden_state_data, aux_input_data, fw_aux_weights_data,
+ bw_aux_weights_data, fw_output_data, bw_output_data,
+ fw_output_hidden_state_data, bw_output_hidden_state_data):
+ activation = Int32Scalar("activation", activation)
+ time_major = BoolScalar("time_major", time_major)
+ merge_outputs_scalar = BoolScalar("merge_outputs", merge_outputs)
+ model = Model().Operation("BIDIRECTIONAL_SEQUENCE_RNN", input, fw_weights,
+ fw_recurrent_weights, fw_bias, fw_hidden_state,
+ bw_weights, bw_recurrent_weights, bw_bias,
+ bw_hidden_state, aux_input, fw_aux_weights,
+ bw_aux_weights, activation, time_major,
+ merge_outputs_scalar)
+ if merge_outputs:
+ model = model.To(fw_output, fw_output_hidden_state, bw_output_hidden_state)
+ else:
+ model = model.To(fw_output, bw_output, fw_output_hidden_state,
+ bw_output_hidden_state)
+
+ data_dict = {
+ input: input_data,
+ fw_weights: fw_weights_data,
+ fw_recurrent_weights: fw_recurrent_weights_data,
+ fw_bias: fw_bias_data,
+ fw_hidden_state: fw_hidden_state_data,
+ bw_weights: bw_weights_data,
+ bw_recurrent_weights: bw_recurrent_weights_data,
+ bw_bias: bw_bias_data,
+ bw_hidden_state: bw_hidden_state_data,
+ aux_input: aux_input_data,
+ fw_aux_weights: fw_aux_weights_data,
+ bw_aux_weights: bw_aux_weights_data,
+ fw_output: fw_output_data,
+ fw_output_hidden_state: fw_output_hidden_state_data,
+ bw_output_hidden_state: bw_output_hidden_state_data,
+ }
+ if not merge_outputs:
+ data_dict[bw_output] = bw_output_data
+
+ example = Example(
+ data_dict, model=model, name=name).AddVariations("relaxed", "float16")
+
+
+num_batches = 2
+max_time = 16
+input_size = 8
+fw_num_units = 16
+bw_num_units = 16
+
+input_data = [
+ 0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133, 0.43773448,
+ 0.60379338, 0.35562468, -0.69424844, -0.93421471, -0.87287879, 0.37144363,
+ -0.62476718, 0.23791671, 0.40060222, 0.1356622, -0.99774903, -0.98858172,
+ -0.38952237, -0.47685933, 0.31073618, 0.71511042, -0.63767755, -0.31729108,
+ 0.33468103, 0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
+ -0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007, -0.61777675,
+ -0.21095741, 0.41213346, 0.73784804, 0.094794154, 0.47791874, 0.86496925,
+ -0.53376222, 0.85315156, 0.10288584, 0.86684, -0.011186242, 0.10513687,
+ 0.87825835, 0.59929144, 0.62827742, 0.18899453, 0.31440187, 0.99059987,
+ 0.87170351, -0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
+ 0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567, -0.66609079,
+ 0.59098077, 0.73017097, 0.74604273, 0.32882881, -0.17503482, 0.22396147,
+ 0.19379807, 0.29120302, 0.077113032, -0.70331609, 0.15804303, -0.93407321,
+ 0.40182066, 0.036301374, 0.66521823, 0.0300982, -0.7747041, -0.02038002,
+ 0.020698071, -0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
+ -0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682, 0.43519354,
+ 0.14744234, 0.62589407, 0.1653645, -0.10651493, -0.045277178, 0.99032974,
+ -0.88255352, -0.85147917, 0.28153265, 0.19455957, -0.55479527, -0.56042433,
+ 0.26048636, 0.84702539, 0.47587705, -0.074295521, -0.12287641, 0.70117295,
+ 0.90532446, 0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
+ -0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563, 0.93455386,
+ -0.6324693, -0.083922029
+] * 2
+
+weights_data = [
+ 0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346, 0.317493,
+ 0.969689, -0.343251, 0.186423, 0.398151, 0.152399, 0.448504, 0.317662,
+ 0.523556, -0.323514, 0.480877, 0.333113, -0.757714, -0.674487, -0.643585,
+ 0.217766, -0.0251462, 0.79512, -0.595574, -0.422444, 0.371572, -0.452178,
+ -0.556069, -0.482188, -0.685456, -0.727851, 0.841829, 0.551535, -0.232336,
+ 0.729158, -0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
+ 0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183, 0.306261,
+ -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303, 0.0354295, 0.566564,
+ -0.485469, -0.620498, 0.832546, 0.697884, -0.279115, 0.294415, -0.584313,
+ 0.548772, 0.0648819, 0.968726, 0.723834, -0.0080452, -0.350386, -0.272803,
+ 0.115121, -0.412644, -0.824713, -0.992843, -0.592904, -0.417893, 0.863791,
+ -0.423461, -0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
+ 0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042, 0.0960841,
+ 0.368357, 0.244191, -0.817703, -0.211223, 0.442012, 0.37225, -0.623598,
+ -0.405423, 0.455101, 0.673656, -0.145345, -0.511346, -0.901675, -0.81252,
+ -0.127006, 0.809865, -0.721884, 0.636255, 0.868989, -0.347973, -0.10179,
+ -0.777449, 0.917274, 0.819286, 0.206218, -0.00785118, 0.167141, 0.45872,
+ 0.972934, -0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
+ 0.277308, 0.415818
+]
+
+recurrent_weights_data = [
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1
+]
+
+bias_data = [
+ 0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
+ -0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
+ 0.37197268, 0.61957061, 0.3956964, -0.37609905
+]
+
+fw_output_data = [
+ 0.496726, 0, 0.965996, 0, 0.0584254, 0, 0, 0.12315, 0, 0, 0.612266,
+ 0.456601, 0, 0.52286, 1.16099, 0.0291232, 0, 0, 0.524901, 0, 0, 0, 0,
+ 1.02116, 0, 1.35762, 0, 0.356909, 0.436415, 0.0355727, 0, 0, 0, 0, 0,
+ 0.262335, 0, 0, 0, 1.33992, 0, 2.9739, 0, 0, 1.31914, 2.66147, 0, 0,
+ 0.942568, 0, 0, 0, 0.025507, 0, 0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
+ 0.8158, 1.21805, 0.586239, 0.25427, 1.04436, 0, 0.630725, 0, 0.133801,
+ 0.210693, 0.363026, 0, 0.533426, 0, 1.25926, 0.722707, 0, 1.22031, 1.30117,
+ 0.495867, 0.222187, 0, 0.72725, 0, 0.767003, 0, 0, 0.147835, 0, 0, 0,
+ 0.608758, 0.469394, 0.00720298, 0.927537, 0, 0.856974, 0.424257, 0, 0,
+ 0.937329, 0, 0, 0, 0.476425, 0, 0.566017, 0.418462, 0.141911, 0.996214,
+ 1.13063, 0, 0.967899, 0, 0, 0, 0.0831304, 0, 0, 1.00378, 0, 0, 0, 1.44818,
+ 1.01768, 0.943891, 0.502745, 0, 0.940135, 0, 0, 0, 0, 0, 0, 2.13243, 0,
+ 0.71208, 0.123918, 1.53907, 1.30225, 1.59644, 0.70222, 0, 0.804329, 0,
+ 0.430576, 0, 0.505872, 0.509603, 0.343448, 0, 0.107756, 0.614544, 1.44549,
+ 1.52311, 0.0454298, 0.300267, 0.562784, 0.395095, 0.228154, 0, 0.675323, 0,
+ 1.70536, 0.766217, 0, 0, 0, 0.735363, 0.0759267, 1.91017, 0.941888, 0, 0, 0,
+ 0, 0, 1.5909, 0, 0, 0, 0, 0.5755, 0, 0.184687, 0, 1.56296, 0.625285, 0, 0,
+ 0, 0, 0, 0.0857888, 0, 0, 0, 0, 0.488383, 0.252786, 0, 0, 0, 1.02817,
+ 1.85665, 0, 0, 0.00981836, 0, 1.06371, 0, 0, 0, 0, 0, 0, 0.290445, 0.316406,
+ 0, 0.304161, 1.25079, 0.0707152, 0, 0.986264, 0.309201, 0, 0, 0, 0, 0,
+ 1.64896, 0.346248, 0, 0.918175, 0.78884, 0.524981, 1.92076, 2.07013,
+ 0.333244, 0.415153, 0.210318, 0, 0, 0, 0, 0, 2.02616, 0, 0.728256, 0.84183,
+ 0.0907453, 0.628881, 3.58099, 1.49974, 0
+] * 2
+
+bw_output_data = [
+ 0.496726, 0, 1.00883, 0, 0.0584256, 0, 0, 0.236412, 0, 0, 0.612267,
+ 0.487726, 0, 0.54883, 1.16099, 0.0291233, 0, 0, 0.428302, 0, 0, 0, 0,
+ 1.13262, 0, 1.64415, 0, 0.311249, 0.570804, 0.259696, 0, 0, 0, 0, 0,
+ 0.262334, 0, 0, 0, 1.23781, 0, 2.86532, 0, 0, 1.34389, 2.76409, 0, 0,
+ 1.03969, 0, 0.00410865, 0, 0.0470295, 0, 0, 0, 0.371556, 0.27175, 1.36614,
+ 1.63956, 0.683887, 1.06176, 0.719552, 0.301314, 0.971195, 0, 0.697143, 0,
+ 0.215219, 0.210693, 0.363027, 0, 0.501283, 0, 1.13399, 0.623774, 0, 1.09851,
+ 1.33313, 0.470441, 0.210965, 0, 0.664178, 0, 0.839686, 0, 0, 0.147834, 0, 0,
+ 0, 0.58786, 0.490128, 0, 0.905806, 0, 0.932134, 0.424257, 0, 0, 0.860629, 0,
+ 0, 0, 0.476425, 0, 0.566017, 0.513721, 0.207341, 1.09508, 1.08385, 0,
+ 0.973787, 0, 0, 0, 0, 0, 0, 1.20698, 0, 0, 0, 1.56135, 1.12369, 0.99588,
+ 0.459803, 0, 0.915854, 0, 0, 0, 0, 0, 0, 2.03206, 0, 0.773264, 0.267228,
+ 1.55012, 1.202, 1.51611, 0.701202, 0, 0.725088, 0, 0.509069, 0, 0.671349,
+ 0.581129, 0.343447, 0, 0.107755, 0.611838, 1.4331, 1.55871, 0.015242,
+ 0.140624, 0.492562, 0.395095, 0.147722, 0, 0.784925, 0, 1.65477, 0.715257,
+ 0, 0, 0, 0.685024, 0, 1.89505, 1.00037, 0, 0, 0, 0, 0, 1.52659, 0, 0, 0, 0,
+ 0.618583, 0, 0.11115, 0, 1.37194, 0.630225, 0, 0, 0, 0, 0, 0.0322124, 0, 0,
+ 0, 0, 0.430834, 0.252786, 0, 0, 0, 0.991297, 1.98451, 0, 0, 0.111511, 0,
+ 1.05513, 0, 0, 0, 0, 0, 0, 0.290445, 0.412559, 0.0429958, 0.256564, 1.27858,
+ 0.289948, 0, 1.01693, 0.327141, 0, 0, 0, 0, 0, 1.83508, 0.346248, 0,
+ 0.961535, 0.790026, 0.552203, 2.13457, 2.19233, 0.333244, 0.316526,
+ 0.179398, 0, 0, 0, 0, 0, 1.86126, 0, 0.728256, 0.750013, 0.011861, 0.576383,
+ 3.38891, 1.29273, 0
+] * 2
+
+fw_output_hidden_state_data = fw_output_data[-fw_num_units:] * 2
+bw_output_hidden_state_data = bw_output_data[:bw_num_units] * 2
+
+test(
+ name="blackbox",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, bw_num_units)),
+ fw_output_hidden_state=Output(
+ "fw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_output_hidden_state=Output(
+ "bw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ input_data=input_data,
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=fw_output_data,
+ bw_output_data=bw_output_data,
+ fw_output_hidden_state_data=fw_output_hidden_state_data,
+ bw_output_hidden_state_data=bw_output_hidden_state_data,
+)
+
+test(
+ name="blackbox_time_major",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(max_time, num_batches, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(max_time, num_batches, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(max_time, num_batches, bw_num_units)),
+ fw_output_hidden_state=Output(
+ "fw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_output_hidden_state=Output(
+ "bw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ activation=1,
+ time_major=1,
+ merge_outputs=0,
+ input_data=convert_to_time_major(input_data,
+ [num_batches, max_time, input_size]),
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=convert_to_time_major(fw_output_data,
+ [num_batches, max_time, fw_num_units]),
+ bw_output_data=convert_to_time_major(bw_output_data,
+ [num_batches, max_time, bw_num_units]),
+ fw_output_hidden_state_data=fw_output_hidden_state_data,
+ bw_output_hidden_state_data=bw_output_hidden_state_data,
+)
+
+test(
+ name="blackbox_time_major_merge_outputs",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(max_time, num_batches, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ activation=1,
+ time_major=1,
+ merge_outputs=1,
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(max_time, num_batches,
+ fw_num_units + bw_num_units)),
+ bw_output=None,
+ fw_output_hidden_state=Output(
+ "fw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_output_hidden_state=Output(
+ "bw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ input_data=convert_to_time_major(input_data,
+ [num_batches, max_time, input_size]),
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=merge_outputs(
+ convert_to_time_major(fw_output_data,
+ [num_batches, max_time, fw_num_units]),
+ [max_time, num_batches, fw_num_units],
+ convert_to_time_major(bw_output_data,
+ [num_batches, max_time, bw_num_units]),
+ [max_time, num_batches, bw_num_units],
+ ),
+ bw_output_data=None,
+ fw_output_hidden_state_data=fw_output_hidden_state_data,
+ bw_output_hidden_state_data=bw_output_hidden_state_data,
+)
+
+test(
+ name="blackbox_reversed_inputs",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, bw_num_units)),
+ fw_output_hidden_state=Output(
+ "fw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_output_hidden_state=Output(
+ "bw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ input_data=reverse_batch_major(input_data,
+ [num_batches, max_time, input_size]),
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=reverse_batch_major(bw_output_data,
+ [num_batches, max_time, bw_num_units]),
+ bw_output_data=reverse_batch_major(fw_output_data,
+ [num_batches, max_time, fw_num_units]),
+ fw_output_hidden_state_data=bw_output_hidden_state_data,
+ bw_output_hidden_state_data=fw_output_hidden_state_data,
+)
+
+# Same test as blackbox but an input is passed to auxiliary input instead of the
+# regular one. Regular input and weights are set to zero.
+test(
+ name="blackbox_aux_input",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input(
+ "aux_input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size)),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, bw_num_units)),
+ fw_output_hidden_state=Output(
+ "fw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_output_hidden_state=Output(
+ "bw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ input_data=[0] * num_batches * max_time * input_size,
+ fw_weights_data=[0] * fw_num_units * input_size,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=[0] * bw_num_units * input_size,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=input_data,
+ fw_aux_weights_data=weights_data,
+ bw_aux_weights_data=weights_data,
+ fw_output_data=fw_output_data,
+ bw_output_data=bw_output_data,
+ fw_output_hidden_state_data=fw_output_hidden_state_data,
+ bw_output_hidden_state_data=bw_output_hidden_state_data,
+)
+
+# Same test as blackbox but input is split in half and passed to both regular
+# and auxiliary input to test their interaction.
+regular_input_data, aux_input_data = split_tensor_in_two(
+ input_data, [num_batches, max_time, input_size])
+regular_fw_weights, aux_fw_weights = split_tensor_in_two(
+ weights_data, [fw_num_units, input_size])
+regular_bw_weights, aux_bw_weights = split_tensor_in_two(
+ weights_data, [bw_num_units, input_size])
+
+test(
+ name="blackbox_regular_and_aux_input",
+ input=Input(
+ "input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size // 2)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size // 2)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size // 2)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input(
+ "aux_input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size // 2)),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size // 2)),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size // 2)),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, bw_num_units)),
+ fw_output_hidden_state=Output(
+ "fw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_output_hidden_state=Output(
+ "bw_output_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ input_data=regular_input_data,
+ fw_weights_data=regular_fw_weights,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=regular_bw_weights,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=aux_input_data,
+ fw_aux_weights_data=aux_fw_weights,
+ bw_aux_weights_data=aux_bw_weights,
+ fw_output_data=fw_output_data,
+ bw_output_data=bw_output_data,
+ fw_output_hidden_state_data=fw_output_hidden_state_data,
+ bw_output_hidden_state_data=bw_output_hidden_state_data,
+)
diff --git a/nn/runtime/test/specs/V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.mod.py b/nn/runtime/test/specs/V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.mod.py
new file mode 100644
index 000000000..450671eff
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.mod.py
@@ -0,0 +1,192 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Unidirectional Sequence LSTM Test:
+# 3 Time Step, Layer Normalization, Cifg, Peephole, Projection, and No Clipping.
+import copy
+
+model = Model()
+
+max_time = 3
+n_batch = 2
+n_input = 5
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 3
+
+input = Input("input", "TENSOR_FLOAT32",
+ "{%d, %d, %d}" % (max_time, n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
+ "{0, 0}")
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights",
+ "TENSOR_FLOAT32", "{0, 0}")
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{0}")
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
+ "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float32Scalar("cell_clip_param", 0.)
+proj_clip_param = Float32Scalar("proj_clip_param", 0.)
+time_major_param = BoolScalar("time_major_param", True)
+
+input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
+ "{0}")
+forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+
+output = Output("output", "TENSOR_FLOAT32",
+ "{%d, %d, %d}" % (max_time, n_batch, n_output))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+
+model = model.Operation(
+ "UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights,
+ input_to_forget_weights, input_to_cell_weights, input_to_output_weights,
+ recurrent_to_input_weights, recurrent_to_forget_weights,
+ recurrent_to_cell_weights, recurrent_to_output_weights,
+ cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights,
+ input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias,
+ projection_weights, projection_bias, output_state_in, cell_state_in,
+ activation_param, cell_clip_param, proj_clip_param, time_major_param,
+ input_layer_norm_weights, forget_layer_norm_weights,
+ cell_layer_norm_weights, output_layer_norm_weights).To(
+ [output, output_state_out, cell_state_out])
+
+# Example 1. Input in operand 0,
+input0 = {
+ input_to_input_weights: [],
+ input_to_forget_weights: [
+ -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
+ -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
+ ],
+ input_to_cell_weights: [
+ -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
+ -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
+ ],
+ input_to_output_weights: [
+ -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
+ 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
+ ],
+ input_gate_bias: [],
+ forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
+ cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
+ output_gate_bias: [0.05, -0.01, 0.2, 0.1],
+ recurrent_to_input_weights: [],
+ recurrent_to_cell_weights: [
+ -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
+ ],
+ recurrent_to_forget_weights: [
+ -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
+ ],
+ recurrent_to_output_weights: [
+ 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
+ ],
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
+ cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
+ projection_weights: [
+ -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
+ ],
+ projection_bias: [],
+ input_layer_norm_weights: [],
+ forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
+ cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
+ output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
+}
+
+test_input = [
+ 0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1, 0.8, 0.1, 0.2, 0.4, 0.5,
+ 0.1, 0.5, 0.2, 0.4, 0.2, 0.2, 0.7, 0.7, 0.1, 0.7, 0.6, 0.9, 0.2, 0.5, 0.7
+]
+
+golden_output = [
+ 0.02129706,
+ 0.140816242,
+ 0.0112733059,
+ -0.0226350538,
+ 0.0916948169,
+ 0.0769175813,
+ 0.0132302344,
+ 0.152308047,
+ 0.0346313119,
+ -0.0269966982,
+ 0.149707705,
+ 0.094149217,
+ -0.0123688057,
+ 0.165790111,
+ 0.0893077999,
+ -0.0103429332,
+ 0.173016444,
+ 0.0720508844,
+]
+
+output0 = {
+ output:
+ golden_output,
+ output_state_out:
+ golden_output[(max_time - 1) * (n_batch * n_output):],
+ cell_state_out: [
+ -0.573662, 0.59525, 0.129295, 0.711027, -0.532303, 0.555613, 0.180099,
+ 0.784506
+ ]
+}
+
+input0[input] = test_input
+input0[output_state_in] = [0 for _ in range(n_batch * n_output)]
+input0[cell_state_in] = [0 for _ in range(n_batch * n_cell)]
+
+Example((input0, output0)).AddVariations("relaxed", "float16")
diff --git a/nn/runtime/test/specs/V1_3/unidirectional_sequence_rnn.mod.py b/nn/runtime/test/specs/V1_3/unidirectional_sequence_rnn.mod.py
new file mode 100644
index 000000000..75563e12c
--- /dev/null
+++ b/nn/runtime/test/specs/V1_3/unidirectional_sequence_rnn.mod.py
@@ -0,0 +1,229 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+
+
+def test(name, input, weights, recurrent_weights, bias, hidden_state,
+ activation, time_major, output, output_state, input_data, weights_data,
+ recurrent_weights_data, bias_data, hidden_state_data, output_data,
+ output_state_data):
+ activation = Int32Scalar("activation", activation)
+ time_major = Int32Scalar("time_major", time_major)
+ model = Model().Operation("UNIDIRECTIONAL_SEQUENCE_RNN", input, weights,
+ recurrent_weights, bias, hidden_state, activation,
+ time_major).To(output, output_state)
+ example = Example(
+ {
+ input: input_data,
+ weights: weights_data,
+ recurrent_weights: recurrent_weights_data,
+ bias: bias_data,
+ hidden_state: hidden_state_data,
+ output: output_data,
+ output_state: output_state_data,
+ },
+ model=model,
+ name=name).AddVariations("relaxed", "float16")
+
+
+def convert_to_time_major(tensor, num_batches, max_time, input_size):
+ return np.array(tensor).reshape([num_batches, max_time, input_size
+ ]).transpose([1, 0, 2]).flatten().tolist()
+
+
+num_batches = 2
+max_time = 16
+input_size = 8
+num_units = 16
+
+input_data = [
+ 0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133, 0.43773448,
+ 0.60379338, 0.35562468, -0.69424844, -0.93421471, -0.87287879, 0.37144363,
+ -0.62476718, 0.23791671, 0.40060222, 0.1356622, -0.99774903, -0.98858172,
+ -0.38952237, -0.47685933, 0.31073618, 0.71511042, -0.63767755, -0.31729108,
+ 0.33468103, 0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
+ -0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007, -0.61777675,
+ -0.21095741, 0.41213346, 0.73784804, 0.094794154, 0.47791874, 0.86496925,
+ -0.53376222, 0.85315156, 0.10288584, 0.86684, -0.011186242, 0.10513687,
+ 0.87825835, 0.59929144, 0.62827742, 0.18899453, 0.31440187, 0.99059987,
+ 0.87170351, -0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
+ 0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567, -0.66609079,
+ 0.59098077, 0.73017097, 0.74604273, 0.32882881, -0.17503482, 0.22396147,
+ 0.19379807, 0.29120302, 0.077113032, -0.70331609, 0.15804303, -0.93407321,
+ 0.40182066, 0.036301374, 0.66521823, 0.0300982, -0.7747041, -0.02038002,
+ 0.020698071, -0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
+ -0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682, 0.43519354,
+ 0.14744234, 0.62589407, 0.1653645, -0.10651493, -0.045277178, 0.99032974,
+ -0.88255352, -0.85147917, 0.28153265, 0.19455957, -0.55479527, -0.56042433,
+ 0.26048636, 0.84702539, 0.47587705, -0.074295521, -0.12287641, 0.70117295,
+ 0.90532446, 0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
+ -0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563, 0.93455386,
+ -0.6324693, -0.083922029
+] * 2
+weights_data = [
+ 0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346, 0.317493,
+ 0.969689, -0.343251, 0.186423, 0.398151, 0.152399, 0.448504, 0.317662,
+ 0.523556, -0.323514, 0.480877, 0.333113, -0.757714, -0.674487, -0.643585,
+ 0.217766, -0.0251462, 0.79512, -0.595574, -0.422444, 0.371572, -0.452178,
+ -0.556069, -0.482188, -0.685456, -0.727851, 0.841829, 0.551535, -0.232336,
+ 0.729158, -0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
+ 0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183, 0.306261,
+ -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303, 0.0354295, 0.566564,
+ -0.485469, -0.620498, 0.832546, 0.697884, -0.279115, 0.294415, -0.584313,
+ 0.548772, 0.0648819, 0.968726, 0.723834, -0.0080452, -0.350386, -0.272803,
+ 0.115121, -0.412644, -0.824713, -0.992843, -0.592904, -0.417893, 0.863791,
+ -0.423461, -0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
+ 0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042, 0.0960841,
+ 0.368357, 0.244191, -0.817703, -0.211223, 0.442012, 0.37225, -0.623598,
+ -0.405423, 0.455101, 0.673656, -0.145345, -0.511346, -0.901675, -0.81252,
+ -0.127006, 0.809865, -0.721884, 0.636255, 0.868989, -0.347973, -0.10179,
+ -0.777449, 0.917274, 0.819286, 0.206218, -0.00785118, 0.167141, 0.45872,
+ 0.972934, -0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
+ 0.277308, 0.415818
+]
+recurrent_weights_data = [
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1
+]
+bias_data = [
+ 0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
+ -0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
+ 0.37197268, 0.61957061, 0.3956964, -0.37609905
+]
+
+output_data = [
+ 0.496726, 0, 0.965996, 0, 0.0584254, 0, 0, 0.12315, 0, 0, 0.612266,
+ 0.456601, 0, 0.52286, 1.16099, 0.0291232, 0, 0, 0.524901, 0, 0, 0, 0,
+ 1.02116, 0, 1.35762, 0, 0.356909, 0.436415, 0.0355727, 0, 0, 0, 0, 0,
+ 0.262335, 0, 0, 0, 1.33992, 0, 2.9739, 0, 0, 1.31914, 2.66147, 0, 0,
+ 0.942568, 0, 0, 0, 0.025507, 0, 0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
+ 0.8158, 1.21805, 0.586239, 0.25427, 1.04436, 0, 0.630725, 0, 0.133801,
+ 0.210693, 0.363026, 0, 0.533426, 0, 1.25926, 0.722707, 0, 1.22031, 1.30117,
+ 0.495867, 0.222187, 0, 0.72725, 0, 0.767003, 0, 0, 0.147835, 0, 0, 0,
+ 0.608758, 0.469394, 0.00720298, 0.927537, 0, 0.856974, 0.424257, 0, 0,
+ 0.937329, 0, 0, 0, 0.476425, 0, 0.566017, 0.418462, 0.141911, 0.996214,
+ 1.13063, 0, 0.967899, 0, 0, 0, 0.0831304, 0, 0, 1.00378, 0, 0, 0, 1.44818,
+ 1.01768, 0.943891, 0.502745, 0, 0.940135, 0, 0, 0, 0, 0, 0, 2.13243, 0,
+ 0.71208, 0.123918, 1.53907, 1.30225, 1.59644, 0.70222, 0, 0.804329, 0,
+ 0.430576, 0, 0.505872, 0.509603, 0.343448, 0, 0.107756, 0.614544, 1.44549,
+ 1.52311, 0.0454298, 0.300267, 0.562784, 0.395095, 0.228154, 0, 0.675323, 0,
+ 1.70536, 0.766217, 0, 0, 0, 0.735363, 0.0759267, 1.91017, 0.941888, 0, 0, 0,
+ 0, 0, 1.5909, 0, 0, 0, 0, 0.5755, 0, 0.184687, 0, 1.56296, 0.625285, 0, 0,
+ 0, 0, 0, 0.0857888, 0, 0, 0, 0, 0.488383, 0.252786, 0, 0, 0, 1.02817,
+ 1.85665, 0, 0, 0.00981836, 0, 1.06371, 0, 0, 0, 0, 0, 0, 0.290445, 0.316406,
+ 0, 0.304161, 1.25079, 0.0707152, 0, 0.986264, 0.309201, 0, 0, 0, 0, 0,
+ 1.64896, 0.346248, 0, 0.918175, 0.78884, 0.524981, 1.92076, 2.07013,
+ 0.333244, 0.415153, 0.210318, 0, 0, 0, 0, 0, 2.02616, 0, 0.728256, 0.84183,
+ 0.0907453, 0.628881, 3.58099, 1.49974, 0
+] * 2
+
+output_state_data = [
+ 0.415153,
+ 0.210318,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2.02616,
+ 0,
+ 0.728256,
+ 0.84183,
+ 0.090745,
+ 0.628881,
+ 3.58099,
+ 1.49974,
+ 0,
+ 0.415153,
+ 0.210318,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2.02616,
+ 0,
+ 0.728256,
+ 0.84183,
+ 0.090745,
+ 0.628881,
+ 3.58099,
+ 1.49974,
+ 0,
+]
+
+test(
+ name="blackbox_state_output",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{{}, {}, {}}}".format(num_batches, max_time, input_size)),
+ weights=Input("weights", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_units, input_size)),
+ recurrent_weights=Input("recurrent_weights", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_units, num_units)),
+ bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
+ hidden_state=Input("hidden_state", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_batches, num_units)),
+ output=Output("output", "TENSOR_FLOAT32",
+ "{{{}, {}, {}}}".format(num_batches, max_time, num_units)),
+ output_state=Output("output_state", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_batches, num_units)),
+ activation=1,
+ time_major=0,
+ input_data=input_data,
+ weights_data=weights_data,
+ recurrent_weights_data=recurrent_weights_data,
+ bias_data=bias_data,
+ hidden_state_data=[0] * num_batches * num_units,
+ output_data=output_data,
+ output_state_data=output_state_data,
+)
+
+test(
+ name="blackbox_time_major_state_output",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{{}, {}, {}}}".format(max_time, num_batches, input_size)),
+ weights=Input("weights", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_units, input_size)),
+ recurrent_weights=Input("recurrent_weights", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_units, num_units)),
+ bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
+ hidden_state=Input("hidden_state", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_batches, num_units)),
+ output=Output("output", "TENSOR_FLOAT32",
+ "{{{}, {}, {}}}".format(max_time, num_batches, num_units)),
+ output_state=Output("output_state", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_batches, num_units)),
+ activation=1,
+ time_major=1,
+ input_data=convert_to_time_major(input_data, num_batches, max_time,
+ input_size),
+ weights_data=weights_data,
+ recurrent_weights_data=recurrent_weights_data,
+ bias_data=bias_data,
+ hidden_state_data=[0] * num_batches * num_units,
+ output_data=convert_to_time_major(output_data, num_batches, max_time,
+ num_units),
+ output_state_data=output_state_data,
+)