summaryrefslogtreecommitdiff
path: root/nn/common/operations/Quantize.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'nn/common/operations/Quantize.cpp')
-rw-r--r--nn/common/operations/Quantize.cpp10
1 files changed, 3 insertions, 7 deletions
diff --git a/nn/common/operations/Quantize.cpp b/nn/common/operations/Quantize.cpp
index fa04bdd01..943a33dcb 100644
--- a/nn/common/operations/Quantize.cpp
+++ b/nn/common/operations/Quantize.cpp
@@ -17,7 +17,6 @@
#include "OperationsUtils.h"
#define LOG_TAG "Operations"
-#include "HalInterfaces.h"
#include "IndexedShapeWrapper.h"
#include "OperationResolver.h"
#include "Tracing.h"
@@ -37,8 +36,6 @@ constexpr uint32_t kOutputTensor = 0;
namespace {
-using namespace hal;
-
template <typename T>
bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) {
NNTRACE_COMP("quantizeToQuant8");
@@ -75,10 +72,10 @@ bool validate(const IOperationValidationContext* context) {
NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
inputType == OperandType::TENSOR_FLOAT32)
- << "Unsupported input operand type for QUANTIZE op: " << toString(inputType);
+ << "Unsupported input operand type for QUANTIZE op: " << inputType;
NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM ||
outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
- << "Unsupported output operand type for QUANTIZE op: " << toString(outputType);
+ << "Unsupported output operand type for QUANTIZE op: " << outputType;
if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
return validateHalVersion(context, HalVersion::V1_3);
} else {
@@ -121,8 +118,7 @@ bool execute(IOperationExecutionContext* context) {
}
}
NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: "
- << toString(inputType)
- << " output type: " << toString(context->getOutputType(kOutputTensor))
+ << inputType << " output type: " << context->getOutputType(kOutputTensor)
<< ")";
}