summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMika Raento <mikie@google.com>2018-04-17 16:35:04 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2018-04-17 16:35:04 +0000
commite770a8d43244527917f62c41e8fba5eeb270ca66 (patch)
treec138ead51850cfb71f52aea9e586895a3e4a0b5d
parent1371cc69e9cac15e463b0573ca98e19ea2694ded (diff)
parent6d78cf0e572d4117b3011bcf0ab768c47dc58031 (diff)
downloadml-e770a8d43244527917f62c41e8fba5eeb270ca66.tar.gz
Merge "Clarify Fully Connected Op comments and naming" into pi-dev
-rw-r--r--nn/common/OperationsUtils.cpp13
-rw-r--r--nn/runtime/include/NeuralNetworks.h9
-rw-r--r--nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp15
-rw-r--r--nn/runtime/test/generated/all_generated_tests.cpp14
-rw-r--r--nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp22
-rw-r--r--nn/runtime/test/generated/models/fully_connected_float_4d_simple.model.cpp34
-rw-r--r--nn/runtime/test/generated/vts_models/fully_connected_float_4d_simple.model.cpp82
-rw-r--r--nn/runtime/test/specs/V1_1/fully_connected_float_4d_simple.mod.py43
8 files changed, 223 insertions, 9 deletions
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index 9d9eabfbb..8803d04c9 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -350,14 +350,17 @@ bool fullyConnectedPrepare(const Shape& input,
} else {
NN_OPS_CHECK(input.type == bias.type);
}
+ // The Tensorflow fully connected layer specification says that input should
+ // be of at least rank 2, so we check. Tflite doesn't check.
NN_OPS_CHECK(getNumberOfDimensions(input) >= 2);
- uint32_t input_size = getNumberOfElements(input);
+ NN_OPS_CHECK(getNumberOfDimensions(weights) == 2);
+ uint32_t input_n_elements = getNumberOfElements(input);
uint32_t num_units = getSizeOfDimension(weights, 0);
- uint32_t batch_size = input_size / getSizeOfDimension(weights, 1);
+ uint32_t input_size = getSizeOfDimension(weights, 1);
+ uint32_t batch_size = input_n_elements / input_size;
NN_OPS_CHECK(getSizeOfDimension(bias, 0) == num_units);
- NN_OPS_CHECK(getSizeOfDimension(weights, 1) * batch_size == input_size);
- NN_OPS_CHECK(getNumberOfDimensions(weights) == 2);
+ NN_OPS_CHECK(input_size * batch_size == input_n_elements);
output->type = input.type;
output->dimensions = {batch_size, num_units};
@@ -873,4 +876,4 @@ bool stridedSlicePrepare(const Shape& input,
return true;
}
} // namespace nn
-} // namespace android \ No newline at end of file
+} // namespace android
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 63e2cc70f..235131c6a 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -462,10 +462,11 @@ typedef enum {
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
- * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
- * [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
- * and “input_size” is the size of the input.
+ * * 0: A tensor of at least rank 2, specifying the input. If rank is greater than 2,
+ * then it gets flattened to a 2-D Tensor. The (flattened) 2-D Tensor is reshaped
+ * (if necessary) to [batch_size, input_size], where "input_size" corresponds to
+ * the number of inputs to the layer, matching the second dimension of weights, and
+ * "batch_size" is calculated by dividing the number of elements by "input_size".
* * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where
* "num_units" corresponds to the number of output nodes.
* * 2: A 1-D tensor, of shape [num_units], specifying the bias.
diff --git a/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
index 81f9e7831..adbbf847f 100644
--- a/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_1_vts_tests.cpp
@@ -466,6 +466,21 @@ TEST_F(NeuralnetworksHidlTest, floor_relaxed) {
floor_relaxed::examples);
}
+namespace fully_connected_float_4d_simple {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_4d_simple test
+#include "examples/fully_connected_float_4d_simple.example.cpp"
+};
+// Generated model constructor
+#include "vts_models/fully_connected_float_4d_simple.model.cpp"
+} // namespace fully_connected_float_4d_simple
+TEST_F(NeuralnetworksHidlTest, fully_connected_float_4d_simple) {
+ generated_tests::Execute(device,
+ fully_connected_float_4d_simple::createTestModel,
+ fully_connected_float_4d_simple::is_ignored,
+ fully_connected_float_4d_simple::examples);
+}
+
namespace fully_connected_float_relaxed {
std::vector<MixedTypedExample> examples = {
// Generated fully_connected_float_relaxed test
diff --git a/nn/runtime/test/generated/all_generated_tests.cpp b/nn/runtime/test/generated/all_generated_tests.cpp
index 54dc0d44b..240f9193d 100644
--- a/nn/runtime/test/generated/all_generated_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_tests.cpp
@@ -2479,6 +2479,20 @@ TEST_F(GeneratedTests, floor_relaxed) {
floor_relaxed::examples);
}
+namespace fully_connected_float_4d_simple {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_4d_simple test
+#include "generated/examples/fully_connected_float_4d_simple.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_4d_simple.model.cpp"
+} // namespace fully_connected_float_4d_simple
+TEST_F(GeneratedTests, fully_connected_float_4d_simple) {
+ execute(fully_connected_float_4d_simple::CreateModel,
+ fully_connected_float_4d_simple::is_ignored,
+ fully_connected_float_4d_simple::examples);
+}
+
namespace fully_connected_float_relaxed {
std::vector<MixedTypedExample> examples = {
// Generated fully_connected_float_relaxed test
diff --git a/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp
new file mode 100644
index 000000000..4086bc559
--- /dev/null
+++ b/nn/runtime/test/generated/examples/fully_connected_float_4d_simple.example.cpp
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, -8, 9, -10}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {24, 25, 26, 58, 59, 60}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
diff --git a/nn/runtime/test/generated/models/fully_connected_float_4d_simple.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_4d_simple.model.cpp
new file mode 100644
index 000000000..46932f5aa
--- /dev/null
+++ b/nn/runtime/test/generated/models/fully_connected_float_4d_simple.model.cpp
@@ -0,0 +1,34 @@
+// Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type4(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {3, 10});
+ OperandType type2(Type::TENSOR_FLOAT32, {3});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 5, 1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto b0 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type3);
+ auto act = model->addOperand(&type4);
+ // Phase 2, operations
+ static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 30);
+ static float b0_init[] = {1.0f, 2.0f, 3.0f};
+ model->setOperandValue(b0, b0_init, sizeof(float) * 3);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op3});
+ // Phase 4: set relaxed execution
+ model->relaxComputationFloat32toFloat16(true);
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/generated/vts_models/fully_connected_float_4d_simple.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_float_4d_simple.model.cpp
new file mode 100644
index 000000000..fcf290060
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/fully_connected_float_4d_simple.model.cpp
@@ -0,0 +1,82 @@
+// Generated code. Do not edit
+// Create the model
+Model createTestModel() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {4, 1, 5, 1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {3, 10},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 120},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 120, .length = 12},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 132, .length = 4},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::FULLY_CONNECTED,
+ .inputs = {0, 1, 2, 4},
+ .outputs = {3},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {3};
+ std::vector<uint8_t> operandValues = {
+ 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 16, 65, 0, 0, 32, 65, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 16, 65, 0, 0, 32, 65, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 16, 65, 0, 0, 32, 65, 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ .relaxComputationFloat32toFloat16 = true,
+ };
+}
+
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
diff --git a/nn/runtime/test/specs/V1_1/fully_connected_float_4d_simple.mod.py b/nn/runtime/test/specs/V1_1/fully_connected_float_4d_simple.mod.py
new file mode 100644
index 000000000..2338c0bed
--- /dev/null
+++ b/nn/runtime/test/specs/V1_1/fully_connected_float_4d_simple.mod.py
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This test is for testing the input requirements of Fully Connected Op:
+# the input's first dimension doesn't have to be the batch size, the
+# input is reshaped as needed.
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 5, 1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{3, 10}", [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 0
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 1
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 1
+])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{3}", [1, 2, 3])
+out0 = Output("op3", "TENSOR_FLOAT32", "{2, 3}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+model = model.RelaxedExecution(True)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
+ 1, 2, 3, 4, 5, 6, 7, -8, 9, -10]}
+output0 = {out0: # output 0
+ [24, 25, 26,
+ 58, 59, 60]}
+
+# Instantiate an example
+Example((input0, output0))