summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2020-05-27 23:06:31 -0700
committerMichael Butler <butlermichael@google.com>2020-06-09 02:46:13 +0000
commitdde5b4dd1fbb4c75ee6a962dffd3fa996d95958b (patch)
tree7d407d20825e1c9600424d21544b47b62470edf5
parent66e5923200afc965bf19b880737e9180e9f5c909 (diff)
downloadml-dde5b4dd1fbb4c75ee6a962dffd3fa996d95958b.tar.gz
Verify non-optional tensors have values in CpuExecutor
This change adds additional validation for non-optional tensors for the following operations: * EMBEDDING_LOOKUP * HASHTABLE_LOOKUP * LSH_PROJECTION * BIDIRECTIONAL_SEQUENCE_LSTM * LSTM * RANDOM_MULTINOMIAL * RNN * SVDF * SPLIT Some operations such as SVDF unpack the scalar values without checking if the value is present, leading to a failed CHECK. This CL adds protections to use default values in these cases, and relies on a corresponding Prepare method to cause these cases to fail validation. Bug: 157516274 Test: mma Test: CtsNNAPITestCases Test: NeuralNetworksTest_static Test: libneuralnetworks_fuzzer Change-Id: I6bb804ec40205c9741b04231022894c714ad28ec
-rw-r--r--nn/common/CpuExecutor.cpp17
-rw-r--r--nn/common/include/CpuExecutor.h8
-rw-r--r--nn/common/operations/BidirectionalSequenceLSTM.cpp78
-rw-r--r--nn/common/operations/LSHProjection.cpp13
-rw-r--r--nn/common/operations/LSTM.cpp54
-rw-r--r--nn/common/operations/RNN.cpp2
-rw-r--r--nn/common/operations/RNN.h2
-rw-r--r--nn/common/operations/SVDF.cpp22
8 files changed, 167 insertions, 29 deletions
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 2673f2de6..d8582ed58 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -991,6 +991,9 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
}
} break;
case OperationType::EMBEDDING_LOOKUP: {
+ if (!allParametersPresent(2, 1)) {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
const RunTimeOperandInfo& values = operands[ins[EmbeddingLookup::kValueTensor]];
const RunTimeOperandInfo& lookups = operands[ins[EmbeddingLookup::kLookupTensor]];
RunTimeOperandInfo& output = operands[outs[EmbeddingLookup::kOutputTensor]];
@@ -1002,6 +1005,9 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
setInfoAndAllocateIfNeeded(&output, outputShape, &result) && lookup.Eval();
} break;
case OperationType::HASHTABLE_LOOKUP: {
+ if (!allParametersPresent(3, 2)) {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
const RunTimeOperandInfo& lookups = operands[ins[HashtableLookup::kLookupTensor]];
const RunTimeOperandInfo& keys = operands[ins[HashtableLookup::kKeyTensor]];
const RunTimeOperandInfo& values = operands[ins[HashtableLookup::kValueTensor]];
@@ -1102,6 +1108,9 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
setInfoAndAllocateIfNeeded(&output, outputShape, &result) && lstm_cell.Eval();
} break;
case OperationType::RANDOM_MULTINOMIAL: {
+ if (!allParametersPresent(3, 1)) {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
const RunTimeOperandInfo& lookups = operands[ins[HashtableLookup::kLookupTensor]];
const RunTimeOperandInfo& keys = operands[ins[HashtableLookup::kKeyTensor]];
const RunTimeOperandInfo& values = operands[ins[HashtableLookup::kValueTensor]];
@@ -1115,6 +1124,10 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
multinomial.Eval();
} break;
case OperationType::RNN: {
+ if (!allParametersPresent(6, 2)) {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
RunTimeOperandInfo& hiddenStateOut = operands[outs[RNN::kHiddenStateOutTensor]];
RunTimeOperandInfo& output = operands[outs[RNN::kOutputTensor]];
@@ -1409,8 +1422,8 @@ int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo
expand_dims::eval(input.buffer, input.shape(), axis, output.buffer, outShape);
} break;
case OperationType::SPLIT: {
- if (ins.size() != 3) {
- LOG(ERROR) << "Wrong input count";
+ const size_t outCount = outs.size();
+ if (!allParametersPresent(3, outCount)) {
return ANEURALNETWORKS_BAD_DATA;
}
diff --git a/nn/common/include/CpuExecutor.h b/nn/common/include/CpuExecutor.h
index a6bf74c95..edb233217 100644
--- a/nn/common/include/CpuExecutor.h
+++ b/nn/common/include/CpuExecutor.h
@@ -250,6 +250,14 @@ T getScalarData(const RunTimeOperandInfo& info) {
return data[0];
}
+template <typename T>
+T getScalarDataWithDefault(const RunTimeOperandInfo& info, T defaultValue) {
+ if (info.length < sizeof(T)) {
+ return defaultValue;
+ }
+ return getScalarData<T>(info);
+}
+
inline bool IsNullInput(const RunTimeOperandInfo* input) {
return input->lifetime == hal::OperandLifeTime::NO_VALUE;
}
diff --git a/nn/common/operations/BidirectionalSequenceLSTM.cpp b/nn/common/operations/BidirectionalSequenceLSTM.cpp
index d4d32b964..12ac43f20 100644
--- a/nn/common/operations/BidirectionalSequenceLSTM.cpp
+++ b/nn/common/operations/BidirectionalSequenceLSTM.cpp
@@ -169,19 +169,24 @@ BidirectionalSequenceLSTM::BidirectionalSequenceLSTM(const Operation& operation,
bw_cell_layer_norm_weights_ = GetInput(operation, operands, kBwCellLayerNormWeightsTensor);
bw_output_layer_norm_weights_ = GetInput(operation, operands, kBwOutputLayerNormWeightsTensor);
- params_.activation = static_cast<TfLiteFusedActivation>(
- getScalarData<int32_t>(*GetInput(operation, operands, kActivationParam)));
+ const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+ params_.activation = static_cast<TfLiteFusedActivation>(getScalarDataWithDefault<int32_t>(
+ activationOperand, TfLiteFusedActivation::kTfLiteActNone));
+ const auto& clipOperand = *GetInput(operation, operands, kCellClipParam);
+ const auto& projOperand = *GetInput(operation, operands, kProjClipParam);
if (input_->type == OperandType::TENSOR_FLOAT32) {
- params_.cell_clip = getScalarData<float>(*GetInput(operation, operands, kCellClipParam));
- params_.proj_clip = getScalarData<float>(*GetInput(operation, operands, kProjClipParam));
+ params_.cell_clip = getScalarDataWithDefault<float>(clipOperand, 0.0f);
+ params_.proj_clip = getScalarDataWithDefault<float>(projOperand, 0.0f);
} else {
- params_.cell_clip = static_cast<float>(
- getScalarData<_Float16>(*GetInput(operation, operands, kCellClipParam)));
- params_.proj_clip = static_cast<float>(
- getScalarData<_Float16>(*GetInput(operation, operands, kProjClipParam)));
+ params_.cell_clip =
+ static_cast<float>(getScalarDataWithDefault<_Float16>(clipOperand, 0.0f));
+ params_.proj_clip =
+ static_cast<float>(getScalarDataWithDefault<_Float16>(projOperand, 0.0f));
}
- params_.merge_outputs = getScalarData<bool>(*GetInput(operation, operands, kMergeOutputsParam));
- params_.time_major = getScalarData<bool>(*GetInput(operation, operands, kTimeMajorParam));
+ const auto& mergeOutputsOperand = *GetInput(operation, operands, kMergeOutputsParam);
+ params_.merge_outputs = getScalarDataWithDefault<bool>(mergeOutputsOperand, false);
+ const auto& timeMajorOperand = *GetInput(operation, operands, kTimeMajorParam);
+ params_.time_major = getScalarDataWithDefault<bool>(timeMajorOperand, false);
params_.use_layer_norm = !IsNullInput(fw_input_layer_norm_weights_);
fw_output_ = GetOutput(operation, operands, kFwOutputTensor);
@@ -205,6 +210,59 @@ bool BidirectionalSequenceLSTM::Prepare(const Operation& operation, RunTimeOpera
Shape* fwOutputShape, Shape* bwOutputShape,
Shape* fwOutputActivationState, Shape* fwOutputCellState,
Shape* bwOutputActivationState, Shape* bwOutputCellState) {
+ // Check we have all the inputs and outputs we need.
+ constexpr int requiredInputs[] = {
+ kInputTensor,
+ kFwInputToForgetWeightsTensor,
+ kFwInputToCellWeightsTensor,
+ kFwInputToOutputWeightsTensor,
+ kFwRecurrentToForgetWeightsTensor,
+ kFwRecurrentToCellWeightsTensor,
+ kFwRecurrentToOutputWeightsTensor,
+ kFwForgetGateBiasTensor,
+ kFwCellGateBiasTensor,
+ kFwOutputGateBiasTensor,
+ kBwInputToForgetWeightsTensor,
+ kBwInputToCellWeightsTensor,
+ kBwInputToOutputWeightsTensor,
+ kBwRecurrentToForgetWeightsTensor,
+ kBwRecurrentToCellWeightsTensor,
+ kBwRecurrentToOutputWeightsTensor,
+ kBwForgetGateBiasTensor,
+ kBwCellGateBiasTensor,
+ kBwOutputGateBiasTensor,
+ kFwInputActivationStateTensor,
+ kFwInputCellStateTensor,
+ kBwInputActivationStateTensor,
+ kBwInputCellStateTensor,
+ kActivationParam,
+ kCellClipParam,
+ kProjClipParam,
+ kMergeOutputsParam,
+ kTimeMajorParam,
+ };
+ for (const int requiredInput : requiredInputs) {
+ NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+ << "required input " << requiredInput << " is omitted";
+ }
+
+ // Check that the scalar operands' buffers are large enough.
+ const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+ NN_RET_CHECK(activationOperand.length >= sizeof(int32_t));
+ const auto& cellOperand = *GetInput(operation, operands, kCellClipParam);
+ const auto& projOperand = *GetInput(operation, operands, kProjClipParam);
+ if (input_->type == OperandType::TENSOR_FLOAT32) {
+ NN_RET_CHECK(cellOperand.length >= sizeof(float));
+ NN_RET_CHECK(projOperand.length >= sizeof(float));
+ } else {
+ NN_RET_CHECK(cellOperand.length >= sizeof(_Float16));
+ NN_RET_CHECK(projOperand.length >= sizeof(_Float16));
+ }
+ const auto& mergeOutputsOperand = *GetInput(operation, operands, kMergeOutputsParam);
+ NN_RET_CHECK(mergeOutputsOperand.length >= sizeof(bool));
+ const auto& timeMajorOperand = *GetInput(operation, operands, kTimeMajorParam);
+ NN_RET_CHECK(timeMajorOperand.length >= sizeof(bool));
+
// Inferring batch size, number of outputs and number of cells from the
// input tensors.
NN_CHECK(NumDimensions(input_) == 3);
diff --git a/nn/common/operations/LSHProjection.cpp b/nn/common/operations/LSHProjection.cpp
index 9ca8be492..bdb106e18 100644
--- a/nn/common/operations/LSHProjection.cpp
+++ b/nn/common/operations/LSHProjection.cpp
@@ -44,8 +44,12 @@ LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* ope
bool LSHProjection::Prepare(const Operation& operation, RunTimeOperandInfo* operands,
Shape* outputShape) {
- const int num_inputs = NumInputsWithValues(operation, operands);
- NN_CHECK(num_inputs == 3 || num_inputs == 4);
+ // Check that none of the required inputs are omitted.
+ constexpr int requiredInputs[] = {kHashTensor, kInputTensor, kTypeParam};
+ for (const int requiredInput : requiredInputs) {
+ NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+ << "required input " << requiredInput << " is omitted";
+ }
NN_CHECK_EQ(NumOutputs(operation), 1);
const RunTimeOperandInfo* hash = GetInput(operation, operands, kHashTensor);
@@ -56,8 +60,9 @@ bool LSHProjection::Prepare(const Operation& operation, RunTimeOperandInfo* oper
const RunTimeOperandInfo* input = GetInput(operation, operands, kInputTensor);
NN_CHECK(NumDimensions(input) >= 1);
- auto type = static_cast<LSHProjectionType>(
- getScalarData<int32_t>(operands[operation.inputs[kTypeParam]]));
+ const auto& typeOperand = operands[operation.inputs[kTypeParam]];
+ NN_RET_CHECK(typeOperand.length >= sizeof(int32_t));
+ auto type = static_cast<LSHProjectionType>(getScalarData<int32_t>(typeOperand));
switch (type) {
case LSHProjectionType_SPARSE:
case LSHProjectionType_SPARSE_DEPRECATED:
diff --git a/nn/common/operations/LSTM.cpp b/nn/common/operations/LSTM.cpp
index 6020353a3..ba5d46a71 100644
--- a/nn/common/operations/LSTM.cpp
+++ b/nn/common/operations/LSTM.cpp
@@ -83,16 +83,20 @@ LSTMCell::LSTMCell(const Operation& operation, RunTimeOperandInfo* operands) {
output_state_in_ = GetInput(operation, operands, kOutputStateInTensor);
cell_state_in_ = GetInput(operation, operands, kCellStateInTensor);
- params_.activation = static_cast<TfLiteFusedActivation>(
- getScalarData<int32_t>(*GetInput(operation, operands, kActivationParam)));
+ const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+ params_.activation = static_cast<TfLiteFusedActivation>(getScalarDataWithDefault<int32_t>(
+ activationOperand, TfLiteFusedActivation::kTfLiteActNone));
+
+ const auto& cellClipOperand = *GetInput(operation, operands, kCellClipParam);
+ const auto& projClipOperand = *GetInput(operation, operands, kProjClipParam);
if (input_->type == OperandType::TENSOR_FLOAT32) {
- params_.cell_clip = getScalarData<float>(*GetInput(operation, operands, kCellClipParam));
- params_.proj_clip = getScalarData<float>(*GetInput(operation, operands, kProjClipParam));
+ params_.cell_clip = getScalarDataWithDefault<float>(cellClipOperand, 0.0f);
+ params_.proj_clip = getScalarDataWithDefault<float>(projClipOperand, 0.0f);
} else {
- params_.cell_clip = static_cast<float>(
- getScalarData<_Float16>(*GetInput(operation, operands, kCellClipParam)));
- params_.proj_clip = static_cast<float>(
- getScalarData<_Float16>(*GetInput(operation, operands, kProjClipParam)));
+ params_.cell_clip =
+ static_cast<float>(getScalarDataWithDefault<_Float16>(cellClipOperand, 0.0f));
+ params_.proj_clip =
+ static_cast<float>(getScalarDataWithDefault<_Float16>(projClipOperand, 0.0f));
}
// We check the version of LSTM by checking the number of the inputs to the
@@ -302,8 +306,42 @@ bool LSTMCell::Prepare(const Operation& operation, RunTimeOperandInfo* operands,
// Check we have all the inputs and outputs we need.
NN_CHECK(NumInputsWithValues(operation, operands) >= 15 &&
NumInputsWithValues(operation, operands) <= 27);
+ constexpr int requiredInputs[] = {
+ kInputTensor,
+ kInputToForgetWeightsTensor,
+ kInputToCellWeightsTensor,
+ kInputToOutputWeightsTensor,
+ kRecurrentToForgetWeightsTensor,
+ kRecurrentToCellWeightsTensor,
+ kRecurrentToOutputWeightsTensor,
+ kForgetGateBiasTensor,
+ kCellGateBiasTensor,
+ kOutputGateBiasTensor,
+ kOutputStateInTensor,
+ kCellStateInTensor,
+ kActivationParam,
+ kCellClipParam,
+ kProjClipParam,
+ };
+ for (const int requiredInput : requiredInputs) {
+ NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+ << "required input " << requiredInput << " is omitted";
+ }
NN_CHECK_EQ(NumOutputs(operation), 4);
+ // Check that the scalar operands' buffers are large enough.
+ const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+ NN_RET_CHECK(activationOperand.length >= sizeof(int32_t));
+ const auto& cellClipOperand = *GetInput(operation, operands, kCellClipParam);
+ const auto& projClipOperand = *GetInput(operation, operands, kProjClipParam);
+ if (input_->type == OperandType::TENSOR_FLOAT32) {
+ NN_RET_CHECK(cellClipOperand.length >= sizeof(float));
+ NN_RET_CHECK(projClipOperand.length >= sizeof(float));
+ } else {
+ NN_RET_CHECK(cellClipOperand.length >= sizeof(_Float16));
+ NN_RET_CHECK(projClipOperand.length >= sizeof(_Float16));
+ }
+
// Inferring batch size, number of outputs and number of cells from the
// input tensors.
NN_CHECK(NumDimensions(input_) > 1);
diff --git a/nn/common/operations/RNN.cpp b/nn/common/operations/RNN.cpp
index dbff94f85..259c0915e 100644
--- a/nn/common/operations/RNN.cpp
+++ b/nn/common/operations/RNN.cpp
@@ -51,7 +51,7 @@ bool RNN::Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shap
NNTRACE_TRANS("RNN::Prepare");
// Check we have all the inputs and outputs we need.
const int num_inputs = NumInputsWithValues(operation, operands);
- NN_CHECK(num_inputs == 5 || num_inputs == 6);
+ NN_CHECK(num_inputs == 6);
NN_CHECK_EQ(NumOutputs(operation), 2);
const RunTimeOperandInfo* input = GetInput(operation, operands, kInputTensor);
diff --git a/nn/common/operations/RNN.h b/nn/common/operations/RNN.h
index e8e380a17..245eb1df3 100644
--- a/nn/common/operations/RNN.h
+++ b/nn/common/operations/RNN.h
@@ -37,7 +37,7 @@ class RNN {
bool Eval();
static constexpr int kInputTensor = 0;
- static constexpr int kWeightsTensor = 1; // Optional
+ static constexpr int kWeightsTensor = 1;
static constexpr int kRecurrentWeightsTensor = 2;
static constexpr int kBiasTensor = 3;
static constexpr int kHiddenStateInTensor = 4;
diff --git a/nn/common/operations/SVDF.cpp b/nn/common/operations/SVDF.cpp
index f9fd5581a..12b91f495 100644
--- a/nn/common/operations/SVDF.cpp
+++ b/nn/common/operations/SVDF.cpp
@@ -38,9 +38,11 @@ SVDF::SVDF(const Operation& operation, RunTimeOperandInfo* operands) {
bias_ = GetInput(operation, operands, kBiasTensor);
state_in_ = GetInput(operation, operands, kStateInTensor);
- params_.rank_ = getScalarData<int>(*GetInput(operation, operands, kRankParam));
- params_.activation_ = static_cast<TfLiteFusedActivation>(
- getScalarData<int>(*GetInput(operation, operands, kActivationParam)));
+ const auto& rankOperand = *GetInput(operation, operands, kRankParam);
+ params_.rank_ = getScalarDataWithDefault<int>(rankOperand, 0);
+ const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+ params_.activation_ = static_cast<TfLiteFusedActivation>(getScalarDataWithDefault<int>(
+ activationOperand, TfLiteFusedActivation::kTfLiteActNone));
state_out_ = GetOutput(operation, operands, kStateOutTensor);
output_ = GetOutput(operation, operands, kOutputTensor);
@@ -53,8 +55,22 @@ bool SVDF::Prepare(const Operation& operation, RunTimeOperandInfo* operands, Sha
const int num_inputs = NumInputsWithValues(operation, operands);
NN_CHECK(num_inputs == 6 || num_inputs == 7);
+ constexpr int requiredInputs[] = {
+ kInputTensor, kWeightsFeatureTensor, kWeightsTimeTensor, kStateInTensor,
+ kRankParam, kActivationParam,
+ };
+ for (const int requiredInput : requiredInputs) {
+ NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
+ << "required input " << requiredInput << " is omitted";
+ }
NN_CHECK_EQ(NumOutputs(operation), 2);
+ // Check that the scalar operands' buffers are large enough.
+ const auto& rankOperand = *GetInput(operation, operands, kRankParam);
+ NN_RET_CHECK(rankOperand.length >= sizeof(int));
+ const auto& activationOperand = *GetInput(operation, operands, kActivationParam);
+ NN_RET_CHECK(activationOperand.length >= sizeof(int));
+
const RunTimeOperandInfo* input = GetInput(operation, operands, SVDF::kInputTensor);
const RunTimeOperandInfo* weights_feature =
GetInput(operation, operands, SVDF::kWeightsFeatureTensor);