summaryrefslogtreecommitdiff
path: root/nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2020-06-02 14:47:55 -0700
committerXusong Wang <xusongw@google.com>2020-06-02 14:47:55 -0700
commit54688b80e26325de12e1dd5112a31343f94d68ae (patch)
tree02341f58ada78cddcff535c7c609f78bc81d57b7 /nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py
parentc7e4e604b2d4f1cf8f2bdbe11350a48c719148d3 (diff)
downloadml-54688b80e26325de12e1dd5112a31343f94d68ae.tar.gz
Relaxed LSH_PROJECTION: do not convert the hash tensor as internal
LSH_PROJECTION is very sensitive to the value of the hash tensor. Prior to this CL, AllInputsAsInternalCoverter will convert the hash tensor to internal by introducing a dummy ADD operation. Under relaxed execution mode, the small precision loss in ADD will result in a significant difference in the final result. This CL prevents the hash tensor from being converted to internal in relaxed precision tests. Additionally, this CL removes a redundant variation in lsh_projection_float16. Fixes: 155962587 Test: NNT_static Change-Id: Id5522b4949a4e3ab4801537e8eb747a25f0cd0e8
Diffstat (limited to 'nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py')
-rw-r--r--nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py5
1 files changed, 3 insertions, 2 deletions
diff --git a/nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py b/nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py
index 2b3b33a1e..a8af8940d 100644
--- a/nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py
+++ b/nn/runtime/test/specs/V1_2/lsh_projection_4_relaxed.mod.py
@@ -20,8 +20,9 @@ num_bits = 2
model = Model()
-hhash = Parameter("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
- [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+hhash = Parameter(
+ "hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765]).ShouldNeverBeInternal()
lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
type_param = Int32Scalar("type_param", 1) # SPARSE DEPRECATED