summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSlava Shklyaev <slavash@google.com>2018-10-22 12:04:16 +0100
committerXusong Wang <xusongw@google.com>2018-11-09 20:29:32 -0800
commitb6d2d99b8ff474952d11564c5d1ff572dadba789 (patch)
tree2ed822f22684ec73797ffee08135e4fd0f40aa74
parent543b117ed0e20099a1c9a53693f92c8511591c64 (diff)
downloadml-b6d2d99b8ff474952d11564c5d1ff572dadba789.tar.gz
Fix incorrect use of NN_CHECK
Fix: 117998554 Test: NeuralNetworksTest_static Change-Id: I8dc01fa2670a88f978791a68063c4fd703de9dc5 Merged-In: I8dc01fa2670a88f978791a68063c4fd703de9dc5 (cherry picked from commit 661abed76a16518c45f3afd1052515e27ae511d3)
-rw-r--r--nn/common/OperationsUtils.cpp29
-rw-r--r--nn/common/include/OperationsUtils.h7
-rw-r--r--nn/common/operations/Activation.cpp4
-rw-r--r--nn/common/operations/ArgMinMax.cpp2
-rw-r--r--nn/common/operations/ChannelShuffle.cpp2
-rw-r--r--nn/common/operations/ExpandDims.cpp2
-rw-r--r--nn/common/operations/Gather.cpp4
-rw-r--r--nn/common/operations/Normalization.cpp4
-rw-r--r--nn/common/operations/Split.cpp2
9 files changed, 27 insertions, 29 deletions
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index e212a8991..1c9411aec 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -56,7 +56,9 @@ uint32_t getNumberOfElements(const Shape& shape) {
uint32_t getNumberOfElements(const Shape& shape,
size_t firstAxisInclusive,
size_t lastAxisExclusive) {
- NN_CHECK(lastAxisExclusive <= shape.dimensions.size());
+ nnAssert(0 <= firstAxisInclusive);
+ nnAssert(firstAxisInclusive <= lastAxisExclusive);
+ nnAssert(lastAxisExclusive <= shape.dimensions.size());
uint32_t count = 1;
for (size_t i = firstAxisInclusive; i < lastAxisExclusive; i++) {
count *= shape.dimensions[i];
@@ -69,23 +71,16 @@ uint32_t getNumberOfDimensions(const Shape& shape) {
}
uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) {
- if (dimensionIdx >= shape.dimensions.size()) {
- // TODO, log the error
- return 0;
- }
+ nnAssert(0 <= dimensionIdx && dimensionIdx < shape.dimensions.size());
return shape.dimensions[dimensionIdx];
}
-int32_t getDimensionIndex(int32_t numberOfDimensions, int32_t axis) {
- NN_OPS_CHECK(-numberOfDimensions <= axis && axis < numberOfDimensions);
- if (axis < 0) {
- axis += numberOfDimensions;
+bool handleNegativeAxis(int32_t numberOfDimensions, int32_t* axis) {
+ NN_CHECK(-numberOfDimensions <= *axis && *axis < numberOfDimensions);
+ if (*axis < 0) {
+ *axis += numberOfDimensions;
}
- return axis;
-}
-
-int32_t getDimensionIndex(const Shape& shape, int32_t axis) {
- return getDimensionIndex(getNumberOfDimensions(shape), axis);
+ return true;
}
bool QuantizeMultiplierSmallerThanOne(double double_multiplier,
@@ -921,7 +916,7 @@ bool stridedSlicePrepare(const Shape& input,
}
bool argMinMaxPrepare(const Shape& input, int32_t axis, Shape* output) {
- axis = getDimensionIndex(input, axis);
+ NN_CHECK(handleNegativeAxis(input, &axis));
output->type = OperandType::TENSOR_INT32;
@@ -940,7 +935,7 @@ bool argMinMaxPrepare(const Shape& input, int32_t axis, Shape* output) {
bool splitPrepare(const Shape& input, int32_t axis, int32_t numOutputs,
std::vector<Shape>* output) {
- axis = getDimensionIndex(input, axis);
+ NN_CHECK(handleNegativeAxis(input, &axis));
const int32_t sizeOfAxisToSplit = input.dimensions[axis];
NN_OPS_CHECK(sizeOfAxisToSplit % numOutputs == 0);
@@ -1071,7 +1066,7 @@ bool groupedConvPrepare(const Shape& input, const Shape& filter, const Shape& bi
}
bool channelShufflePrepare(const Shape& input, int32_t numGroups, int32_t axis, Shape* output) {
- axis = getDimensionIndex(input, axis);
+ NN_CHECK(handleNegativeAxis(input, &axis));
NN_OPS_CHECK(numGroups > 0);
NN_OPS_CHECK(getSizeOfDimension(input, axis) % numGroups == 0);
output->type = input.type;
diff --git a/nn/common/include/OperationsUtils.h b/nn/common/include/OperationsUtils.h
index ff5b348d1..8e60790b1 100644
--- a/nn/common/include/OperationsUtils.h
+++ b/nn/common/include/OperationsUtils.h
@@ -71,8 +71,11 @@ uint32_t getNumberOfDimensions(const Shape& shape);
uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
// Converts an axis index from the range [-dims, dims) into the range [0, dims).
-int32_t getDimensionIndex(const Shape& shape, int32_t axis);
-int32_t getDimensionIndex(int32_t numberOfDimensions, int32_t axis);
+bool handleNegativeAxis(int32_t numberOfDimensions, int32_t* axis);
+
+inline bool handleNegativeAxis(const Shape& shape, int32_t* axis) {
+ return handleNegativeAxis(getNumberOfDimensions(shape), axis);
+}
inline uint32_t computeOutSize(uint32_t imageSize, uint32_t filterSize, uint32_t stride,
uint32_t paddingHead, uint32_t paddingTail) {
diff --git a/nn/common/operations/Activation.cpp b/nn/common/operations/Activation.cpp
index 74f10f7d0..d6adeea5e 100644
--- a/nn/common/operations/Activation.cpp
+++ b/nn/common/operations/Activation.cpp
@@ -110,7 +110,7 @@ inline bool softmaxFloat32Impl(const float* inputData, const Shape& inputShape,
bool softmaxFloat32(const float* inputData, const Shape& inputShape, const float beta, int32_t axis,
float* outputData, const Shape& outputShape) {
int32_t ndim = getNumberOfDimensions(inputShape);
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
// TFLite optimized implementation only supports computation along the last axis
if (axis == ndim - 1) {
NNTRACE_COMP("optimized_ops::Softmax::float");
@@ -311,7 +311,7 @@ bool softmaxQuant8Impl(const uint8_t* inputData, const Shape& inputShape, const
bool softmaxQuant8(const uint8_t* inputData, const Shape& inputShape, const float beta,
int32_t axis, uint8_t* outputData, const Shape& outputShape) {
int32_t ndim = getNumberOfDimensions(inputShape);
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
if (outputShape.offset != 0 || outputShape.scale != 1.f / 256) {
LOG(ERROR) << "incorrect scale / offset for output";
diff --git a/nn/common/operations/ArgMinMax.cpp b/nn/common/operations/ArgMinMax.cpp
index 8d0dcef18..87e6d3c63 100644
--- a/nn/common/operations/ArgMinMax.cpp
+++ b/nn/common/operations/ArgMinMax.cpp
@@ -56,7 +56,7 @@ bool argMinMaxGeneric(const uint8_t* inputData, const Shape& inputShape,
int32 axis, bool isArgMin,
uint8_t* outputData, const Shape& outputShape) {
NNTRACE_TRANS("argMinMaxGeneric");
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
#define NNAPI_IMPL_ARG_MIN_MAX(operandType, dataType) \
if (inputShape.type == operandType) { \
diff --git a/nn/common/operations/ChannelShuffle.cpp b/nn/common/operations/ChannelShuffle.cpp
index ae1257d4c..36fce51c8 100644
--- a/nn/common/operations/ChannelShuffle.cpp
+++ b/nn/common/operations/ChannelShuffle.cpp
@@ -47,7 +47,7 @@ inline bool channelShuffleImpl(const T* inputData, const Shape& inputShape, int3
bool channelShuffleGeneric(const uint8_t* inputData, const Shape& inputShape, int32_t numGroups,
int32_t axis, uint8_t* outputData, const Shape& outputShape) {
NNTRACE_TRANS("channelShuffleGeneric");
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
if (inputShape.type == OperandType::TENSOR_FLOAT32) {
return channelShuffleImpl<float>(reinterpret_cast<const float*>(inputData), inputShape,
numGroups, axis, reinterpret_cast<float*>(outputData),
diff --git a/nn/common/operations/ExpandDims.cpp b/nn/common/operations/ExpandDims.cpp
index 6503407bd..573a467e5 100644
--- a/nn/common/operations/ExpandDims.cpp
+++ b/nn/common/operations/ExpandDims.cpp
@@ -23,7 +23,7 @@ namespace nn {
namespace expand_dims {
bool prepare(const Shape& input, int32_t axis, Shape* output) {
- axis = getDimensionIndex(getNumberOfDimensions(input) + 1, axis);
+ NN_CHECK(handleNegativeAxis(getNumberOfDimensions(input) + 1, &axis));
output->type = input.type;
output->offset = input.offset;
diff --git a/nn/common/operations/Gather.cpp b/nn/common/operations/Gather.cpp
index a95f7d559..8f5e38b3f 100644
--- a/nn/common/operations/Gather.cpp
+++ b/nn/common/operations/Gather.cpp
@@ -48,7 +48,7 @@ inline bool gatherImpl(const T* inputData, const Shape& inputShape, int32_t axis
} // namespace
bool prepare(const Shape& input, int32_t axis, const Shape& indices, Shape* output) {
- axis = getDimensionIndex(input, axis);
+ NN_CHECK(handleNegativeAxis(input, &axis));
output->dimensions.clear();
output->dimensions.reserve(getNumberOfDimensions(input) + getNumberOfDimensions(indices) - 1);
output->dimensions.insert(output->dimensions.end(), input.dimensions.begin(),
@@ -64,7 +64,7 @@ bool compute(const uint8_t* inputData, const Shape& inputShape, int32_t axis,
const int32_t* indicesData, const Shape& indicesShape, uint8_t* outputData,
const Shape& outputShape) {
NNTRACE_TRANS("gather::compute");
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
#define ANDROID_NN_GATHER(operandType, dataType) \
case operandType: { \
diff --git a/nn/common/operations/Normalization.cpp b/nn/common/operations/Normalization.cpp
index c963f0c19..138a3c5ec 100644
--- a/nn/common/operations/Normalization.cpp
+++ b/nn/common/operations/Normalization.cpp
@@ -56,7 +56,7 @@ inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, i
bool l2normFloat32(const float* inputData, const Shape& inputShape, int32_t axis, float* outputData,
const Shape& outputShape) {
int32_t ndim = getNumberOfDimensions(inputShape);
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
// TFLite optimized implementation only supports computation along the last axis
if (axis == ndim - 1) {
NNTRACE_COMP("optimized_ops::L2Normalization::float");
@@ -103,7 +103,7 @@ bool localResponseNormFloat32(const float* inputData, const Shape& inputShape, i
float bias, float alpha, float beta, int32_t axis, float* outputData,
const Shape& outputShape) {
int32_t ndim = getNumberOfDimensions(inputShape);
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
// TFLite optimized implementation only supports computation along the last axis
if (axis == ndim - 1) {
NNTRACE_COMP("optimized_ops::LocalResponseNormalization::float");
diff --git a/nn/common/operations/Split.cpp b/nn/common/operations/Split.cpp
index 290e2c8fe..c6276a6bf 100644
--- a/nn/common/operations/Split.cpp
+++ b/nn/common/operations/Split.cpp
@@ -28,7 +28,7 @@ template <typename Scalar>
bool splitGeneric(const Scalar* inputData, const Shape& inputShape, int32_t axis,
const std::vector<Scalar*>* outputDataPtrs,
const std::vector<Shape>& outputShapes) {
- axis = getDimensionIndex(inputShape, axis);
+ NN_CHECK(handleNegativeAxis(inputShape, &axis));
int outerSize = 1;
for (int i = 0; i < axis; ++i) {
outerSize *= inputShape.dimensions[i];