summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSlava Shklyaev <slavash@google.com>2020-05-21 12:59:15 +0100
committerSlava Shklyaev <slavash@google.com>2020-06-10 13:30:59 +0100
commitb7afd5952251561918940586acfae88e6722b6ad (patch)
tree5ade5d608c8c9bdbc6af974bf13d59787a8eee8b
parent3594be18553db032e3af6dae5c6b9e310eb2f3d8 (diff)
downloadml-b7afd5952251561918940586acfae88e6722b6ad.tar.gz
Relax control flow boundary operand dimension constraint
Also updates the NDK spec to mention the constraints and adds some validation tests. Bug: 132458982 Bug: 156918813 Test: NNT_static Change-Id: Ia112e46da065a623a52ac1c402d28dcb963e5580
-rw-r--r--nn/common/Utils.cpp14
-rw-r--r--nn/runtime/include/NeuralNetworks.h31
-rw-r--r--nn/runtime/test/TestValidateOperations.cpp206
-rw-r--r--nn/tools/api/NeuralNetworks.t18
-rw-r--r--nn/tools/api/types.spec13
5 files changed, 248 insertions, 34 deletions
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index 81e5cf1e1..41a217958 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -689,12 +689,18 @@ static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion ha
return ANEURALNETWORKS_NO_ERROR;
}
-// Checks if two operands have the same types, shapes, and parameters.
-// Omits lifetime, numberOfConsumers, and location.
+// Checks if two operands have the same types, ranks (if specified), dimensions
+// (if specified), scales, zeroPoints, and extraParams.
static bool compatible(const Operand& a, const Operand& b) {
NN_RET_CHECK(a.type == b.type) << toString(a.type) << " != " << toString(b.type);
- NN_RET_CHECK(a.dimensions == b.dimensions)
- << toString(a.dimensions) << " != " << toString(b.dimensions);
+ if (a.dimensions.size() != 0 && b.dimensions.size() != 0) {
+ NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions";
+ for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) {
+ if (a.dimensions[i] != 0 && b.dimensions[i] != 0) {
+ NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions";
+ }
+ }
+ }
NN_RET_CHECK_EQ(a.scale, b.scale);
NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
NN_RET_CHECK(a.extraParams == b.extraParams)
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 239cb6185..b4d304f78 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -5441,7 +5441,8 @@ typedef enum {
* The inputs and outputs of the two referenced models must agree with the
* signature of this operation. That is, if the operation has (3 + n) inputs
* and m outputs, both models must have n inputs and m outputs with the same
- * types as the corresponding operation inputs and outputs.
+ * types, ranks (if specified), and dimensions (if specified) as the
+ * corresponding operation inputs and outputs.
*
* Inputs:
* * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]
@@ -5510,13 +5511,13 @@ typedef enum {
* Inputs:
* * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition
* model. The model must have (m + k + n) inputs with
- * the same types as the corresponding inputs of the WHILE operation
- * and exactly one output of {@link ANEURALNETWORKS_TENSOR_BOOL8}
- * and shape [1].
+ * the same types, ranks (if specified), and dimensions (if specified)
+ * as the corresponding inputs of the WHILE operation and exactly one
+ * output of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1].
* * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model.
* The model must have (m + k + n) inputs and (m + k) outputs with
- * the same types as the corresponding inputs and outputs of the WHILE
- * operation.
+ * the same types, ranks (if specified), and dimensions (if specified)
+ * as the corresponding inputs and outputs of the WHILE operation.
* * (m inputs): Initial values for input-output operands.
* * (k inputs): Initial values for state-only operands.
* * (n inputs): Values for input-only operands.
@@ -6162,7 +6163,9 @@ typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
*
* If a tensor operand's type is not fully specified, the dimensions
* of the operand are deduced from the operand types and values of the
- * operation for which that operand is an output.
+ * operation for which that operand is an output or from the corresponding
+ * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input
+ * operand type in the case of referenced model input operands.
*
* <p>In the following situations, a tensor operand type must be fully
* specified:<ul>
@@ -6171,10 +6174,10 @@ typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
* non-nullptr buffer) or
* {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
* <li>The operand is a model input (see
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}). A
- * fully specified tensor operand type must either be provided
- * to {@link ANeuralNetworksModel_addOperand}; or it must be
- * provided to the corresponding
+ * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
+ * model within a compilation. A fully specified tensor operand type
+ * must either be provided to {@link ANeuralNetworksModel_addOperand};
+ * or it must be provided to the corresponding
* {@link ANeuralNetworksExecution_setInput}, or
* {@link ANeuralNetworksExecution_setInputFromMemory}.
* EXCEPTION: If the input is optional and omitted
@@ -6182,9 +6185,9 @@ typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
* {@link ANeuralNetworksExecution_setInput}) then it need
* not have a fully specified tensor operand type.</li>
* <li>The operand is a model output (see
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs})
- * and is to be used with
- * {@link ANeuralNetworksExecution_startComputeWithDependencies}.
+ * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
+ * model within a compilation and is to be used with {@link
+ * ANeuralNetworksExecution_startComputeWithDependencies}.
* A fully specified tensor operand type must either be provided
* to {@link ANeuralNetworksModel_addOperand}; or it must be
* provided to the corresponding
diff --git a/nn/runtime/test/TestValidateOperations.cpp b/nn/runtime/test/TestValidateOperations.cpp
index 01e4338d7..f8d376e3a 100644
--- a/nn/runtime/test/TestValidateOperations.cpp
+++ b/nn/runtime/test/TestValidateOperations.cpp
@@ -54,7 +54,7 @@ static const int32_t kAvailableOperandCodes[] = {ANEURALNETWORKS_FLOAT32,
ANEURALNETWORKS_TENSOR_OEM_BYTE};
ANeuralNetworksOperandType getOpType(int32_t opcode, uint32_t dimCount = 0,
- uint32_t* dim = nullptr) {
+ const uint32_t* dim = nullptr) {
ANeuralNetworksOperandType opType = {.type = opcode,
.dimensionCount = dimCount,
.dimensions = dim,
@@ -72,10 +72,11 @@ ANeuralNetworksOperandType getOpType(int32_t opcode, uint32_t dimCount = 0,
struct OperandTypeWithExtraParams {
OperandTypeWithExtraParams(const ANeuralNetworksOperandType& operandType)
- : operandType(operandType), channelQuant(std::nullopt) {}
+ : operandType(operandType), channelQuant(std::nullopt), valueModel(std::nullopt) {}
ANeuralNetworksOperandType operandType;
std::optional<ANeuralNetworksSymmPerChannelQuantParams> channelQuant;
+ std::optional<const ANeuralNetworksModel*> valueModel;
bool operator==(const OperandTypeWithExtraParams& that) const {
if (operandType.type != that.operandType.type ||
@@ -92,6 +93,10 @@ struct OperandTypeWithExtraParams {
return false;
}
+ if (valueModel != that.valueModel) {
+ return false;
+ }
+
if (operandType.dimensions) {
if (!that.operandType.dimensions) {
return false;
@@ -346,6 +351,10 @@ class OperationTestBase {
mValidOutputs[index].channelQuant = channelQuant;
}
+ void setInputOperandValueFromModel(int32_t index, const ANeuralNetworksModel* valueModel) {
+ mValidInputs[index].valueModel = valueModel;
+ }
+
// Add each operand separately and add the operation using these operands.
// This function does not cover the cases that an operand is used mutiple times.
int32_t addOperation(const std::vector<OperandTypeWithExtraParams>& inputs,
@@ -362,6 +371,10 @@ class OperationTestBase {
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
model, opIdx, &inputs[i].channelQuant.value());
}
+ if (inputs[i].valueModel) {
+ ANeuralNetworksModel_setOperandValueFromModel(model, opIdx,
+ inputs[i].valueModel.value());
+ }
inputIds.push_back(opIdx++);
}
for (uint32_t i = 0; i < outputs.size(); i++) {
@@ -663,6 +676,12 @@ std::ostream& operator<<(std::ostream& os, const OperandTypeWithExtraParams& ope
} else {
os << ", channelQuant: nullopt";
}
+
+ if (operand.valueModel.has_value()) {
+ os << ", valueModel: " << operand.valueModel.value();
+ } else {
+ os << ", valueModel: nullopt";
+ }
os << "}";
return os;
}
@@ -4443,4 +4462,187 @@ TEST(OperationValidationTest, RANK_quant8_signed) {
rankTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
}
+ANeuralNetworksModel* makeIdentityModel(const ANeuralNetworksOperandType* type) {
+ ANeuralNetworksModel* model = nullptr;
+ EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
+ uint32_t inputs[] = {0};
+ uint32_t outputs[] = {1};
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CAST, std::size(inputs),
+ inputs, std::size(outputs), outputs),
+ ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inputs), inputs,
+ std::size(outputs), outputs),
+ ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
+ return model;
+}
+
+void testIf(const std::vector<uint32_t>& outerDims, const ANeuralNetworksModel* thenModel,
+ const ANeuralNetworksModel* elseModel, bool testMutations) {
+ const uint32_t kThenOperand = 1;
+ const uint32_t kElseOperand = 2;
+ const uint32_t boolDims[] = {1};
+ ANeuralNetworksOperandType boolType =
+ getOpType(ANEURALNETWORKS_TENSOR_BOOL8, std::size(boolDims), boolDims);
+ ANeuralNetworksOperandType dataType =
+ getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, outerDims.size(), outerDims.data());
+ ANeuralNetworksOperandType modelType = getOpType(ANEURALNETWORKS_MODEL);
+ OperationTestBase test(ANEURALNETWORKS_IF, {boolType, modelType, modelType, dataType},
+ {dataType});
+ test.setInputOperandValueFromModel(kThenOperand, thenModel);
+ test.setInputOperandValueFromModel(kElseOperand, elseModel);
+ if (testMutations) {
+ test.testOpsValidations();
+ } else {
+ EXPECT_TRUE(test.testSuccess());
+ }
+}
+
+void testIf(const std::vector<uint32_t>& outerDims, const std::vector<uint32_t>& thenDims,
+ const std::vector<uint32_t>& elseDims, bool testMutations) {
+ ANeuralNetworksOperandType thenDataType =
+ getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, thenDims.size(), thenDims.data());
+ ANeuralNetworksOperandType elseDataType =
+ getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, elseDims.size(), elseDims.data());
+ ANeuralNetworksModel* thenModel = makeIdentityModel(&thenDataType);
+ ANeuralNetworksModel* elseModel = makeIdentityModel(&elseDataType);
+ testIf(outerDims, thenModel, elseModel, testMutations);
+ ANeuralNetworksModel_free(thenModel);
+ ANeuralNetworksModel_free(elseModel);
+}
+
+TEST(OperationValidationTest, IF) {
+ const std::vector<std::pair<std::string, std::vector<uint32_t>>> configurations = {
+ {"fully specified", {1, 2, 3}},
+ {"unknown dimensions", {0, 2, 0}},
+ {"unknown rank", {}},
+ };
+ // We skip mutation testing for all but the first configuration to avoid the
+ // exponential runtime blowup. The value of additional operand code and
+ // count mutations is negligible because whether the shapes are fully
+ // specified should have nothing to do with the operand code or count.
+ bool testMutations = true;
+ for (const auto& [outerTrace, outerDims] : configurations) {
+ SCOPED_TRACE(testing::Message() << "outerDims: " << outerTrace);
+ for (const auto& [thenTrace, thenDims] : configurations) {
+ SCOPED_TRACE(testing::Message() << "thenDims: " << thenTrace);
+ for (const auto& [elseTrace, elseDims] : configurations) {
+ SCOPED_TRACE(testing::Message() << "elseDims: " << elseTrace);
+ testIf(outerDims, thenDims, elseDims, testMutations);
+ testMutations = false;
+ }
+ }
+ }
+}
+
+// operand 0 --> +------+
+// | LESS | --> operand 2
+// operand 1 --> +------+
+//
+ANeuralNetworksModel* makeWhileCondModel(const ANeuralNetworksOperandType* dataType,
+ const ANeuralNetworksOperandType* boolType) {
+ ANeuralNetworksModel* model = nullptr;
+ EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, dataType), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, dataType), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, boolType), ANEURALNETWORKS_NO_ERROR);
+ const uint32_t inputs[] = {0, 1};
+ const uint32_t outputs[] = {2};
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_LESS, std::size(inputs),
+ inputs, std::size(outputs), outputs),
+ ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inputs), inputs,
+ std::size(outputs), outputs),
+ ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
+ return model;
+}
+
+// +------+
+// operand 0 --> | CAST | --> operand 2
+// +------+
+//
+// operand 1 --> (unused)
+//
+ANeuralNetworksModel* makeWhileBodyModel(const ANeuralNetworksOperandType* type) {
+ ANeuralNetworksModel* model = nullptr;
+ EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
+ const uint32_t castInputs[] = {0};
+ const uint32_t castOutputs[] = {2};
+ EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CAST, std::size(castInputs),
+ castInputs, std::size(castOutputs), castOutputs),
+ ANEURALNETWORKS_NO_ERROR);
+ const uint32_t modelInputs[] = {0, 1};
+ const uint32_t modelOutputs[] = {2};
+ EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(modelInputs),
+ modelInputs, std::size(modelOutputs),
+ modelOutputs),
+ ANEURALNETWORKS_NO_ERROR);
+ EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
+ return model;
+}
+
+void testWhile(const std::vector<uint32_t>& outerDims, const ANeuralNetworksModel* condModel,
+ const ANeuralNetworksModel* bodyModel, bool testMutations) {
+ const uint32_t kCondOperand = 0;
+ const uint32_t kBodyOperand = 1;
+ ANeuralNetworksOperandType modelType = getOpType(ANEURALNETWORKS_MODEL);
+ ANeuralNetworksOperandType dataType =
+ getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, outerDims.size(), outerDims.data());
+ OperationTestBase test(ANEURALNETWORKS_WHILE, {modelType, modelType, dataType, dataType},
+ {dataType});
+ test.setInputOperandValueFromModel(kCondOperand, condModel);
+ test.setInputOperandValueFromModel(kBodyOperand, bodyModel);
+ if (testMutations) {
+ test.testOpsValidations();
+ } else {
+ EXPECT_TRUE(test.testSuccess());
+ }
+}
+
+void testWhile(const std::vector<uint32_t>& outerDims, const std::vector<uint32_t>& condDims,
+ const std::vector<uint32_t>& bodyDims, bool testMutations) {
+ const uint32_t boolDims[] = {1};
+ ANeuralNetworksOperandType boolType =
+ getOpType(ANEURALNETWORKS_TENSOR_BOOL8, std::size(boolDims), boolDims);
+ ANeuralNetworksOperandType condDataType =
+ getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, condDims.size(), condDims.data());
+ ANeuralNetworksOperandType bodyDataType =
+ getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, bodyDims.size(), bodyDims.data());
+ ANeuralNetworksModel* condModel = makeWhileCondModel(&condDataType, &boolType);
+ ANeuralNetworksModel* bodyModel = makeWhileBodyModel(&bodyDataType);
+ testWhile(outerDims, condModel, bodyModel, testMutations);
+ ANeuralNetworksModel_free(condModel);
+ ANeuralNetworksModel_free(bodyModel);
+}
+
+TEST(OperationValidationTest, WHILE) {
+ const std::vector<std::pair<std::string, std::vector<uint32_t>>> configurations = {
+ {"fully specified", {1, 2, 3}},
+ {"unknown dimensions", {0, 2, 0}},
+ {"unknown rank", {}},
+ };
+ // We skip mutation testing for all but the first configuration to avoid the
+ // exponential runtime blowup. The value of additional operand code and
+ // count mutations is negligible because whether the shapes are fully
+ // specified should have nothing to do with the operand code or count.
+ bool testMutations = true;
+ for (const auto& [outerTrace, outerDims] : configurations) {
+ SCOPED_TRACE(testing::Message() << "outerDims: " << outerTrace);
+ for (const auto& [condTrace, condDims] : configurations) {
+ SCOPED_TRACE(testing::Message() << "condDims: " << condTrace);
+ for (const auto& [bodyTrace, bodyDims] : configurations) {
+ SCOPED_TRACE(testing::Message() << "bodyDims: " << bodyTrace);
+ testWhile(outerDims, condDims, bodyDims, testMutations);
+ testMutations = false;
+ }
+ }
+ }
+}
+
} // end namespace
diff --git a/nn/tools/api/NeuralNetworks.t b/nn/tools/api/NeuralNetworks.t
index c4ce06fdf..a6ab569cd 100644
--- a/nn/tools/api/NeuralNetworks.t
+++ b/nn/tools/api/NeuralNetworks.t
@@ -594,7 +594,9 @@ typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
*
* If a tensor operand's type is not fully specified, the dimensions
* of the operand are deduced from the operand types and values of the
- * operation for which that operand is an output.
+ * operation for which that operand is an output or from the corresponding
+ * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input
+ * operand type in the case of referenced model input operands.
*
* <p>In the following situations, a tensor operand type must be fully
* specified:<ul>
@@ -603,10 +605,10 @@ typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
* non-nullptr buffer) or
* {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
* <li>The operand is a model input (see
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}). A
- * fully specified tensor operand type must either be provided
- * to {@link ANeuralNetworksModel_addOperand}; or it must be
- * provided to the corresponding
+ * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
+ * model within a compilation. A fully specified tensor operand type
+ * must either be provided to {@link ANeuralNetworksModel_addOperand};
+ * or it must be provided to the corresponding
* {@link ANeuralNetworksExecution_setInput}, or
* {@link ANeuralNetworksExecution_setInputFromMemory}.
* EXCEPTION: If the input is optional and omitted
@@ -614,9 +616,9 @@ typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
* {@link ANeuralNetworksExecution_setInput}) then it need
* not have a fully specified tensor operand type.</li>
* <li>The operand is a model output (see
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs})
- * and is to be used with
- * {@link ANeuralNetworksExecution_startComputeWithDependencies}.
+ * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
+ * model within a compilation and is to be used with {@link
+ * ANeuralNetworksExecution_startComputeWithDependencies}.
* A fully specified tensor operand type must either be provided
* to {@link ANeuralNetworksModel_addOperand}; or it must be
* provided to the corresponding
diff --git a/nn/tools/api/types.spec b/nn/tools/api/types.spec
index bb91b9e43..5ef9f68d4 100644
--- a/nn/tools/api/types.spec
+++ b/nn/tools/api/types.spec
@@ -6176,7 +6176,8 @@
* The inputs and outputs of the two referenced %{model_or_subgraph}s must agree with the
* signature of this operation. That is, if the operation has (3 + n) inputs
* and m outputs, both %{model_or_subgraph}s must have n inputs and m outputs with the same
- * types as the corresponding operation inputs and outputs.
+ * types, ranks (if specified), and dimensions (if specified) as the
+ * corresponding operation inputs and outputs.
*
* Inputs:
* * 0: A value of type {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} and shape [1]
@@ -6246,13 +6247,13 @@
* Inputs:
* * 0: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the condition
* %{model_or_subgraph}. The %{model_or_subgraph} must have (m + k + n) inputs with
- * the same types as the corresponding inputs of the WHILE operation
- * and exactly one output of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}
- * and shape [1].
+ * the same types, ranks (if specified), and dimensions (if specified)
+ * as the corresponding inputs of the WHILE operation and exactly one
+ * output of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} and shape [1].
* * 1: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the body %{model_or_subgraph}.
* The %{model_or_subgraph} must have (m + k + n) inputs and (m + k) outputs with
- * the same types as the corresponding inputs and outputs of the WHILE
- * operation.
+ * the same types, ranks (if specified), and dimensions (if specified)
+ * as the corresponding inputs and outputs of the WHILE operation.
* * (m inputs): Initial values for input-output operands.
* * (k inputs): Initial values for state-only operands.
* * (n inputs): Values for input-only operands.