diff options
Diffstat (limited to 'unsupported/test/cxx11_tensor_reduction.cpp')
-rw-r--r-- | unsupported/test/cxx11_tensor_reduction.cpp | 88 |
1 files changed, 56 insertions, 32 deletions
diff --git a/unsupported/test/cxx11_tensor_reduction.cpp b/unsupported/test/cxx11_tensor_reduction.cpp index 1490ec3da..c46c4c91d 100644 --- a/unsupported/test/cxx11_tensor_reduction.cpp +++ b/unsupported/test/cxx11_tensor_reduction.cpp @@ -53,20 +53,22 @@ static void test_trivial_reductions() { } } -template <int DataLayout> +template <typename Scalar,int DataLayout> static void test_simple_reductions() { - Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7); + Tensor<Scalar, 4, DataLayout> tensor(2, 3, 5, 7); tensor.setRandom(); + // Add a little offset so that the product reductions won't be close to zero. + tensor += tensor.constant(Scalar(0.5f)); array<ptrdiff_t, 2> reduction_axis2; reduction_axis2[0] = 1; reduction_axis2[1] = 3; - Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis2); + Tensor<Scalar, 2, DataLayout> result = tensor.sum(reduction_axis2); VERIFY_IS_EQUAL(result.dimension(0), 2); VERIFY_IS_EQUAL(result.dimension(1), 5); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 5; ++j) { - float sum = 0.0f; + Scalar sum = Scalar(0.0f); for (int k = 0; k < 3; ++k) { for (int l = 0; l < 7; ++l) { sum += tensor(i, k, j, l); @@ -77,7 +79,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> sum1 = tensor.sum(); + Tensor<Scalar, 0, DataLayout> sum1 = tensor.sum(); VERIFY_IS_EQUAL(sum1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -85,7 +87,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> sum2 = tensor.sum(reduction_axis4); + Tensor<Scalar, 0, DataLayout> sum2 = tensor.sum(reduction_axis4); VERIFY_IS_EQUAL(sum2.rank(), 0); VERIFY_IS_APPROX(sum1(), sum2()); @@ -98,7 +100,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 7; ++j) { - float prod = 1.0f; + Scalar prod = Scalar(1.0f); for (int k = 0; k < 2; ++k) { for (int l = 0; l < 5; ++l) { prod *= tensor(k, i, l, j); @@ -109,7 +111,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> prod1 = tensor.prod(); + Tensor<Scalar, 0, DataLayout> prod1 = tensor.prod(); VERIFY_IS_EQUAL(prod1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -117,7 +119,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> prod2 = tensor.prod(reduction_axis4); + Tensor<Scalar, 0, DataLayout> prod2 = tensor.prod(reduction_axis4); VERIFY_IS_EQUAL(prod2.rank(), 0); VERIFY_IS_APPROX(prod1(), prod2()); @@ -130,7 +132,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 7; ++j) { - float max_val = std::numeric_limits<float>::lowest(); + Scalar max_val = std::numeric_limits<Scalar>::lowest(); for (int k = 0; k < 2; ++k) { for (int l = 0; l < 5; ++l) { max_val = (std::max)(max_val, tensor(k, i, l, j)); @@ -141,7 +143,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> max1 = tensor.maximum(); + Tensor<Scalar, 0, DataLayout> max1 = tensor.maximum(); VERIFY_IS_EQUAL(max1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -149,7 +151,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> max2 = tensor.maximum(reduction_axis4); + Tensor<Scalar, 0, DataLayout> max2 = tensor.maximum(reduction_axis4); VERIFY_IS_EQUAL(max2.rank(), 0); VERIFY_IS_APPROX(max1(), max2()); @@ -162,7 +164,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 5; ++i) { for (int j = 0; j < 7; ++j) { - float min_val = (std::numeric_limits<float>::max)(); + Scalar min_val = (std::numeric_limits<Scalar>::max)(); for (int k = 0; k < 2; ++k) { for (int l = 0; l < 3; ++l) { min_val = (std::min)(min_val, tensor(k, l, i, j)); @@ -173,7 +175,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> min1 = tensor.minimum(); + Tensor<Scalar, 0, DataLayout> min1 = tensor.minimum(); VERIFY_IS_EQUAL(min1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -181,7 +183,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> min2 = tensor.minimum(reduction_axis4); + Tensor<Scalar, 0, DataLayout> min2 = tensor.minimum(reduction_axis4); VERIFY_IS_EQUAL(min2.rank(), 0); VERIFY_IS_APPROX(min1(), min2()); @@ -194,7 +196,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 5; ++i) { for (int j = 0; j < 7; ++j) { - float sum = 0.0f; + Scalar sum = Scalar(0.0f); int count = 0; for (int k = 0; k < 2; ++k) { for (int l = 0; l < 3; ++l) { @@ -202,12 +204,12 @@ static void test_simple_reductions() { ++count; } } - VERIFY_IS_APPROX(result(i, j), sum / count); + VERIFY_IS_APPROX(result(i, j), sum / Scalar(count)); } } { - Tensor<float, 0, DataLayout> mean1 = tensor.mean(); + Tensor<Scalar, 0, DataLayout> mean1 = tensor.mean(); VERIFY_IS_EQUAL(mean1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -215,7 +217,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> mean2 = tensor.mean(reduction_axis4); + Tensor<Scalar, 0, DataLayout> mean2 = tensor.mean(reduction_axis4); VERIFY_IS_EQUAL(mean2.rank(), 0); VERIFY_IS_APPROX(mean1(), mean2()); @@ -225,11 +227,11 @@ static void test_simple_reductions() { Tensor<int, 1> ints(10); std::iota(ints.data(), ints.data() + ints.dimension(0), 0); - TensorFixedSize<bool, Sizes<> > all; - all = ints.all(); - VERIFY(!all()); - all = (ints >= ints.constant(0)).all(); - VERIFY(all()); + TensorFixedSize<bool, Sizes<> > all_; + all_ = ints.all(); + VERIFY(!all_()); + all_ = (ints >= ints.constant(0)).all(); + VERIFY(all_()); TensorFixedSize<bool, Sizes<> > any; any = (ints > ints.constant(10)).any(); @@ -368,7 +370,7 @@ static void test_static_dims() { Tensor<float, 2, DataLayout> out(72, 97); in.setRandom(); -#if !EIGEN_HAS_CONSTEXPR +#if !EIGEN_HAS_CONSTEXPR array<int, 2> reduction_axis; reduction_axis[0] = 1; reduction_axis[1] = 3; @@ -386,7 +388,7 @@ static void test_static_dims() { expected = (std::max)(expected, in(i, k, j, l)); } } - VERIFY_IS_APPROX(out(i, j), expected); + VERIFY_IS_EQUAL(out(i, j), expected); } } } @@ -417,7 +419,7 @@ static void test_innermost_last_dims() { expected = (std::max)(expected, in(l, k, i, j)); } } - VERIFY_IS_APPROX(out(i, j), expected); + VERIFY_IS_EQUAL(out(i, j), expected); } } } @@ -448,7 +450,7 @@ static void test_innermost_first_dims() { expected = (std::max)(expected, in(i, j, k, l)); } } - VERIFY_IS_APPROX(out(i, j), expected); + VERIFY_IS_EQUAL(out(i, j), expected); } } } @@ -479,16 +481,37 @@ static void test_reduce_middle_dims() { expected = (std::max)(expected, in(i, k, l, j)); } } - VERIFY_IS_APPROX(out(i, j), expected); + VERIFY_IS_EQUAL(out(i, j), expected); + } + } +} + +static void test_sum_accuracy() { + Tensor<float, 3> tensor(101, 101, 101); + for (float prescribed_mean : {1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f}) { + tensor.setRandom(); + tensor += tensor.constant(prescribed_mean); + + Tensor<float, 0> sum = tensor.sum(); + double expected_sum = 0.0; + for (int i = 0; i < 101; ++i) { + for (int j = 0; j < 101; ++j) { + for (int k = 0; k < 101; ++k) { + expected_sum += static_cast<double>(tensor(i, j, k)); + } + } } + VERIFY_IS_APPROX(sum(), static_cast<float>(expected_sum)); } } -void test_cxx11_tensor_reduction() { +EIGEN_DECLARE_TEST(cxx11_tensor_reduction) { CALL_SUBTEST(test_trivial_reductions<ColMajor>()); CALL_SUBTEST(test_trivial_reductions<RowMajor>()); - CALL_SUBTEST(test_simple_reductions<ColMajor>()); - CALL_SUBTEST(test_simple_reductions<RowMajor>()); + CALL_SUBTEST(( test_simple_reductions<float,ColMajor>() )); + CALL_SUBTEST(( test_simple_reductions<float,RowMajor>() )); + CALL_SUBTEST(( test_simple_reductions<Eigen::half,ColMajor>() )); + CALL_SUBTEST(( test_simple_reductions<Eigen::bfloat16,ColMajor>() )); CALL_SUBTEST(test_reductions_in_expr<ColMajor>()); CALL_SUBTEST(test_reductions_in_expr<RowMajor>()); CALL_SUBTEST(test_full_reductions<ColMajor>()); @@ -505,4 +528,5 @@ void test_cxx11_tensor_reduction() { CALL_SUBTEST(test_innermost_first_dims<RowMajor>()); CALL_SUBTEST(test_reduce_middle_dims<ColMajor>()); CALL_SUBTEST(test_reduce_middle_dims<RowMajor>()); + CALL_SUBTEST(test_sum_accuracy()); } |