aboutsummaryrefslogtreecommitdiff
path: root/eval
diff options
context:
space:
mode:
authorMarat Dukhan <maratek@google.com>2020-06-08 01:24:01 -0700
committerXNNPACK Team <xnnpack-github-robot@google.com>2020-06-08 01:24:50 -0700
commitd310214fc895583281cd5a83d32dcb0c1f33f984 (patch)
tree3a0cdd5fad04f05860cfde125b0c35888b65d437 /eval
parent2b9efd8fb7df7a989e98df704afd07ca3664d5d8 (diff)
downloadXNNPACK-d310214fc895583281cd5a83d32dcb0c1f33f984.tar.gz
Fix bugs in ROUND evaluation tests
Fix bugs in tests cases for +-infinity PiperOrigin-RevId: 315229899
Diffstat (limited to 'eval')
-rw-r--r--eval/f32-roundd.cc36
-rw-r--r--eval/f32-roundne.cc28
-rw-r--r--eval/f32-roundu.cc36
-rw-r--r--eval/f32-roundz.cc36
4 files changed, 68 insertions, 68 deletions
diff --git a/eval/f32-roundd.cc b/eval/f32-roundd.cc
index cbd0b227c..96debd652 100644
--- a/eval/f32-roundd.cc
+++ b/eval/f32-roundd.cc
@@ -159,7 +159,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__SSE_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -171,7 +171,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__SSE_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -425,7 +425,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__SSE2_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -437,7 +437,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__SSE2_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -691,7 +691,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__SSE41, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -703,7 +703,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__SSE41, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -957,7 +957,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__NEON_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -969,7 +969,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__NEON_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1223,7 +1223,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__NEON_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1235,7 +1235,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__NEON_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1489,7 +1489,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__NEONV8, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1501,7 +1501,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__NEONV8, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1755,7 +1755,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__PSIMD_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1767,7 +1767,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDD__PSIMD_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2020,7 +2020,7 @@ TEST(ROUNDD__SCALAR_ADDSUB, negative_integral) {
TEST(ROUNDD__SCALAR_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2032,7 +2032,7 @@ TEST(ROUNDD__SCALAR_ADDSUB, positive_infinity) {
TEST(ROUNDD__SCALAR_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2284,7 +2284,7 @@ TEST(ROUNDD__SCALAR_CVT, negative_integral) {
TEST(ROUNDD__SCALAR_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2296,7 +2296,7 @@ TEST(ROUNDD__SCALAR_CVT, positive_infinity) {
TEST(ROUNDD__SCALAR_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundd__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::floor(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
diff --git a/eval/f32-roundne.cc b/eval/f32-roundne.cc
index 50c028ce6..debb70d0f 100644
--- a/eval/f32-roundne.cc
+++ b/eval/f32-roundne.cc
@@ -99,7 +99,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__SSE_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -111,7 +111,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__SSE_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -305,7 +305,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__SSE2_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -317,7 +317,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__SSE2_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -511,7 +511,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__SSE41, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -523,7 +523,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__SSE41, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -717,7 +717,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__NEON_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -729,7 +729,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__NEON_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -923,7 +923,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__NEONV8, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -935,7 +935,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__NEONV8, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1129,7 +1129,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__PSIMD_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1141,7 +1141,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDNE__PSIMD_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1334,7 +1334,7 @@ TEST(ROUNDNE__SCALAR_ADDSUB, negative_integral) {
TEST(ROUNDNE__SCALAR_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1346,7 +1346,7 @@ TEST(ROUNDNE__SCALAR_ADDSUB, positive_infinity) {
TEST(ROUNDNE__SCALAR_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundne__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::nearbyint(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
diff --git a/eval/f32-roundu.cc b/eval/f32-roundu.cc
index 3f6c92fda..8e5af7856 100644
--- a/eval/f32-roundu.cc
+++ b/eval/f32-roundu.cc
@@ -159,7 +159,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__SSE_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -171,7 +171,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__SSE_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -425,7 +425,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__SSE2_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -437,7 +437,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__SSE2_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -691,7 +691,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__SSE41, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -703,7 +703,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__SSE41, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -957,7 +957,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__NEON_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -969,7 +969,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__NEON_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1223,7 +1223,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__NEON_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1235,7 +1235,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__NEON_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1489,7 +1489,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__NEONV8, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1501,7 +1501,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__NEONV8, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1755,7 +1755,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__PSIMD_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1767,7 +1767,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDU__PSIMD_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2020,7 +2020,7 @@ TEST(ROUNDU__SCALAR_ADDSUB, negative_integral) {
TEST(ROUNDU__SCALAR_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2032,7 +2032,7 @@ TEST(ROUNDU__SCALAR_ADDSUB, positive_infinity) {
TEST(ROUNDU__SCALAR_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2284,7 +2284,7 @@ TEST(ROUNDU__SCALAR_CVT, negative_integral) {
TEST(ROUNDU__SCALAR_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -2296,7 +2296,7 @@ TEST(ROUNDU__SCALAR_CVT, positive_infinity) {
TEST(ROUNDU__SCALAR_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundu__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::ceil(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
diff --git a/eval/f32-roundz.cc b/eval/f32-roundz.cc
index 8935537d2..d8977154d 100644
--- a/eval/f32-roundz.cc
+++ b/eval/f32-roundz.cc
@@ -99,7 +99,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__SSE_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -111,7 +111,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__SSE_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -305,7 +305,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__SSE2_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -317,7 +317,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__SSE2_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__sse2_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -511,7 +511,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__SSE41, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -523,7 +523,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__SSE41, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__sse41(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -717,7 +717,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__NEON_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -729,7 +729,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__NEON_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__neon_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -923,7 +923,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__NEON_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -935,7 +935,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__NEON_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__neon_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1129,7 +1129,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__NEONV8, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1141,7 +1141,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__NEONV8, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__neonv8(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1335,7 +1335,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__PSIMD_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1347,7 +1347,7 @@ constexpr int kBlockSize = 1024;
TEST(ROUNDZ__PSIMD_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__psimd_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1540,7 +1540,7 @@ TEST(ROUNDZ__SCALAR_ADDSUB, negative_integral) {
TEST(ROUNDZ__SCALAR_ADDSUB, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1552,7 +1552,7 @@ TEST(ROUNDZ__SCALAR_ADDSUB, positive_infinity) {
TEST(ROUNDZ__SCALAR_ADDSUB, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__scalar_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1744,7 +1744,7 @@ TEST(ROUNDZ__SCALAR_CVT, negative_integral) {
TEST(ROUNDZ__SCALAR_CVT, positive_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0x7F800000));
+ std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))
@@ -1756,7 +1756,7 @@ TEST(ROUNDZ__SCALAR_CVT, positive_infinity) {
TEST(ROUNDZ__SCALAR_CVT, negative_infinity) {
std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize);
- std::fill(inputs.begin(), inputs.end(), UINT32_C(0xFF800000));
+ std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
xnn_math_f32_roundz__scalar_cvt(kBlockSize * sizeof(float), inputs.data(), outputs.data());
const uint32_t reference_output = fp32_to_bits(std::trunc(inputs[0]));
ASSERT_EQ(reference_output, fp32_to_bits(outputs[0]))