diff options
author | Yi Kong <yikong@google.com> | 2022-02-25 16:41:05 +0000 |
---|---|---|
committer | Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com> | 2022-02-25 16:41:05 +0000 |
commit | bc0f5df265caa21a2120c22453655a7fcc941991 (patch) | |
tree | fb979fb4cf4f8052c8cc66b1ec9516d91fcd859b /unsupported/test/cxx11_tensor_block_io.cpp | |
parent | 8fd413e275f78a4c240f1442ce5cf77c73a20a55 (diff) | |
parent | 7cb50013986f04dce5fac87bebf319bb8db37a36 (diff) | |
download | eigen-android-13.0.0_r75.tar.gz |
Merge changes Iee153445,Iee274471 am: 79df15ea88 am: 10f298fc41 am: 7cb5001398t_frc_odp_330442040t_frc_odp_330442000t_frc_ase_330444010android-wear-13.0.0-gpl_r3android-wear-13.0.0-gpl_r2android-wear-13.0.0-gpl_r1android-vts-13.0_r8android-vts-13.0_r7android-vts-13.0_r6android-vts-13.0_r5android-vts-13.0_r4android-vts-13.0_r3android-vts-13.0_r2android-t-qpr3-beta-3-gplandroid-t-qpr3-beta-1-gplandroid-t-qpr2-beta-3-gplandroid-t-qpr2-beta-2-gplandroid-t-qpr1-beta-3-gplandroid-t-qpr1-beta-1-gplandroid-cts-13.0_r8android-cts-13.0_r7android-cts-13.0_r6android-cts-13.0_r5android-cts-13.0_r4android-cts-13.0_r3android-cts-13.0_r2android-13.0.0_r83android-13.0.0_r82android-13.0.0_r81android-13.0.0_r80android-13.0.0_r79android-13.0.0_r78android-13.0.0_r77android-13.0.0_r76android-13.0.0_r75android-13.0.0_r74android-13.0.0_r73android-13.0.0_r72android-13.0.0_r71android-13.0.0_r70android-13.0.0_r69android-13.0.0_r68android-13.0.0_r67android-13.0.0_r66android-13.0.0_r65android-13.0.0_r64android-13.0.0_r63android-13.0.0_r62android-13.0.0_r61android-13.0.0_r60android-13.0.0_r59android-13.0.0_r58android-13.0.0_r57android-13.0.0_r56android-13.0.0_r55android-13.0.0_r54android-13.0.0_r53android-13.0.0_r52android-13.0.0_r51android-13.0.0_r50android-13.0.0_r49android-13.0.0_r48android-13.0.0_r47android-13.0.0_r46android-13.0.0_r45android-13.0.0_r44android-13.0.0_r43android-13.0.0_r42android-13.0.0_r41android-13.0.0_r40android-13.0.0_r39android-13.0.0_r38android-13.0.0_r37android-13.0.0_r36android-13.0.0_r35android-13.0.0_r34android-13.0.0_r33android-13.0.0_r32android-13.0.0_r30android-13.0.0_r29android-13.0.0_r28android-13.0.0_r27android-13.0.0_r24android-13.0.0_r23android-13.0.0_r22android-13.0.0_r21android-13.0.0_r20android-13.0.0_r19android-13.0.0_r18android-13.0.0_r17android-13.0.0_r16aml_go_odp_330912000aml_go_ads_330915100aml_go_ads_330915000aml_go_ads_330913000android13-tests-releaseandroid13-tests-devandroid13-qpr3-s9-releaseandroid13-qpr3-s8-releaseandroid13-qpr3-s7-releaseandroid13-qpr3-s6-releaseandroid13-qpr3-s5-releaseandroid13-qpr3-s4-releaseandroid13-qpr3-s3-releaseandroid13-qpr3-s2-releaseandroid13-qpr3-s14-releaseandroid13-qpr3-s13-releaseandroid13-qpr3-s12-releaseandroid13-qpr3-s11-releaseandroid13-qpr3-s10-releaseandroid13-qpr3-s1-releaseandroid13-qpr3-releaseandroid13-qpr3-c-s8-releaseandroid13-qpr3-c-s7-releaseandroid13-qpr3-c-s6-releaseandroid13-qpr3-c-s5-releaseandroid13-qpr3-c-s4-releaseandroid13-qpr3-c-s3-releaseandroid13-qpr3-c-s2-releaseandroid13-qpr3-c-s12-releaseandroid13-qpr3-c-s11-releaseandroid13-qpr3-c-s10-releaseandroid13-qpr3-c-s1-releaseandroid13-qpr2-s9-releaseandroid13-qpr2-s8-releaseandroid13-qpr2-s7-releaseandroid13-qpr2-s6-releaseandroid13-qpr2-s5-releaseandroid13-qpr2-s3-releaseandroid13-qpr2-s2-releaseandroid13-qpr2-s12-releaseandroid13-qpr2-s11-releaseandroid13-qpr2-s10-releaseandroid13-qpr2-s1-releaseandroid13-qpr2-releaseandroid13-qpr2-b-s1-releaseandroid13-qpr1-s8-releaseandroid13-qpr1-s7-releaseandroid13-qpr1-s6-releaseandroid13-qpr1-s5-releaseandroid13-qpr1-s4-releaseandroid13-qpr1-s3-releaseandroid13-qpr1-s2-releaseandroid13-qpr1-s1-releaseandroid13-qpr1-releaseandroid13-mainline-go-adservices-releaseandroid13-frc-odp-releaseandroid13-devandroid13-d4-s2-releaseandroid13-d4-s1-releaseandroid13-d4-releaseandroid13-d3-s1-releaseandroid13-d2-releaseandroid-wear-13.0.0-gpl_r1
Original change: https://android-review.googlesource.com/c/platform/external/eigen/+/1999079
Change-Id: I4c76dc5ddc7fb0ae9fc42436f28bd8bf9de50a97
Diffstat (limited to 'unsupported/test/cxx11_tensor_block_io.cpp')
-rw-r--r-- | unsupported/test/cxx11_tensor_block_io.cpp | 445 |
1 files changed, 445 insertions, 0 deletions
diff --git a/unsupported/test/cxx11_tensor_block_io.cpp b/unsupported/test/cxx11_tensor_block_io.cpp new file mode 100644 index 000000000..52f7dde9b --- /dev/null +++ b/unsupported/test/cxx11_tensor_block_io.cpp @@ -0,0 +1,445 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// clang-format off +#include "main.h" +#include <Eigen/CXX11/Tensor> +// clang-format on + +// -------------------------------------------------------------------------- // +// A set of tests for TensorBlockIO: copying data between tensor blocks. + +template <int NumDims> +static DSizes<Index, NumDims> RandomDims(Index min, Index max) { + DSizes<Index, NumDims> dims; + for (int i = 0; i < NumDims; ++i) { + dims[i] = internal::random<Index>(min, max); + } + return DSizes<Index, NumDims>(dims); +} + +static internal::TensorBlockShapeType RandomBlockShape() { + return internal::random<bool>() + ? internal::TensorBlockShapeType::kUniformAllDims + : internal::TensorBlockShapeType::kSkewedInnerDims; +} + +template <int NumDims> +static size_t RandomTargetBlockSize(const DSizes<Index, NumDims>& dims) { + return internal::random<size_t>(1, dims.TotalSize()); +} + +template <int Layout, int NumDims> +static Index GetInputIndex(Index output_index, + const array<Index, NumDims>& output_to_input_dim_map, + const array<Index, NumDims>& input_strides, + const array<Index, NumDims>& output_strides) { + int input_index = 0; + if (Layout == ColMajor) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[0]]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[NumDims - 1]]; + } +} + +template <typename T, int NumDims, int Layout> +static void test_block_io_copy_data_from_source_to_target() { + using TensorBlockIO = internal::TensorBlockIO<T, Index, NumDims, Layout>; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Generate a random input Tensor. + DSizes<Index, NumDims> dims = RandomDims<NumDims>(1, 30); + Tensor<T, NumDims, Layout> input(dims); + input.setRandom(); + + // Write data to an output Tensor. + Tensor<T, NumDims, Layout> output(dims); + + // Construct a tensor block mapper. + using TensorBlockMapper = + internal::TensorBlockMapper<NumDims, Layout, Index>; + TensorBlockMapper block_mapper( + dims, {RandomBlockShape(), RandomTargetBlockSize(dims), {0, 0, 0}}); + + // We will copy data from input to output through this buffer. + Tensor<T, NumDims, Layout> block(block_mapper.blockDimensions()); + + // Precompute strides for TensorBlockIO::Copy. + auto input_strides = internal::strides<Layout>(dims); + auto output_strides = internal::strides<Layout>(dims); + + const T* input_data = input.data(); + T* output_data = output.data(); + T* block_data = block.data(); + + for (int i = 0; i < block_mapper.blockCount(); ++i) { + auto desc = block_mapper.blockDescriptor(i); + + auto blk_dims = desc.dimensions(); + auto blk_strides = internal::strides<Layout>(blk_dims); + + { + // Read from input into a block buffer. + IODst dst(blk_dims, blk_strides, block_data, 0); + IOSrc src(input_strides, input_data, desc.offset()); + + TensorBlockIO::Copy(dst, src); + } + + { + // Write from block buffer to output. + IODst dst(blk_dims, output_strides, output_data, desc.offset()); + IOSrc src(blk_strides, block_data, 0); + + TensorBlockIO::Copy(dst, src); + } + } + + for (int i = 0; i < dims.TotalSize(); ++i) { + VERIFY_IS_EQUAL(input_data[i], output_data[i]); + } +} + +template <typename T, int NumDims, int Layout> +static void test_block_io_copy_using_reordered_dimensions() { + // Generate a random input Tensor. + DSizes<Index, NumDims> dims = RandomDims<NumDims>(1, 30); + Tensor<T, NumDims, Layout> input(dims); + input.setRandom(); + + // Create a random dimension re-ordering/shuffle. + std::vector<int> shuffle; + + for (int i = 0; i < NumDims; ++i) shuffle.push_back(i); + std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937(g_seed)); + + DSizes<Index, NumDims> output_tensor_dims; + DSizes<Index, NumDims> input_to_output_dim_map; + DSizes<Index, NumDims> output_to_input_dim_map; + for (Index i = 0; i < NumDims; ++i) { + output_tensor_dims[shuffle[i]] = dims[i]; + input_to_output_dim_map[i] = shuffle[i]; + output_to_input_dim_map[shuffle[i]] = i; + } + + // Write data to an output Tensor. + Tensor<T, NumDims, Layout> output(output_tensor_dims); + + // Construct a tensor block mapper. + // NOTE: Tensor block mapper works with shuffled dimensions. + using TensorBlockMapper = + internal::TensorBlockMapper<NumDims, Layout, Index>; + TensorBlockMapper block_mapper(output_tensor_dims, + {RandomBlockShape(), + RandomTargetBlockSize(output_tensor_dims), + {0, 0, 0}}); + + // We will copy data from input to output through this buffer. + Tensor<T, NumDims, Layout> block(block_mapper.blockDimensions()); + + // Precompute strides for TensorBlockIO::Copy. + auto input_strides = internal::strides<Layout>(dims); + auto output_strides = internal::strides<Layout>(output_tensor_dims); + + const T* input_data = input.data(); + T* output_data = output.data(); + T* block_data = block.data(); + + for (Index i = 0; i < block_mapper.blockCount(); ++i) { + auto desc = block_mapper.blockDescriptor(i); + + const Index first_coeff_index = GetInputIndex<Layout, NumDims>( + desc.offset(), output_to_input_dim_map, input_strides, + output_strides); + + // NOTE: Block dimensions are in the same order as output dimensions. + + using TensorBlockIO = internal::TensorBlockIO<T, Index, NumDims, Layout>; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + auto blk_dims = desc.dimensions(); + auto blk_strides = internal::strides<Layout>(blk_dims); + + { + // Read from input into a block buffer. + IODst dst(blk_dims, blk_strides, block_data, 0); + IOSrc src(input_strides, input_data, first_coeff_index); + + // TODO(ezhulenev): Remove when fully switched to TensorBlock. + DSizes<int, NumDims> dim_map; + for (int j = 0; j < NumDims; ++j) + dim_map[j] = static_cast<int>(output_to_input_dim_map[j]); + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/dim_map); + } + + { + // We need to convert block dimensions from output to input order. + auto dst_dims = blk_dims; + for (int out_dim = 0; out_dim < NumDims; ++out_dim) { + dst_dims[output_to_input_dim_map[out_dim]] = blk_dims[out_dim]; + } + + // Write from block buffer to output. + IODst dst(dst_dims, input_strides, output_data, first_coeff_index); + IOSrc src(blk_strides, block_data, 0); + + // TODO(ezhulenev): Remove when fully switched to TensorBlock. + DSizes<int, NumDims> dim_map; + for (int j = 0; j < NumDims; ++j) + dim_map[j] = static_cast<int>(input_to_output_dim_map[j]); + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/dim_map); + } + } + + for (Index i = 0; i < dims.TotalSize(); ++i) { + VERIFY_IS_EQUAL(input_data[i], output_data[i]); + } +} + +// This is the special case for reading data with reordering, when dimensions +// before/after reordering are the same. Squeezing reads along inner dimensions +// in this case is illegal, because we reorder innermost dimension. +template <int Layout> +static void test_block_io_copy_using_reordered_dimensions_do_not_squeeze() { + DSizes<Index, 3> tensor_dims(7, 9, 7); + DSizes<Index, 3> block_dims = tensor_dims; + + DSizes<int, 3> block_to_tensor_dim; + block_to_tensor_dim[0] = 2; + block_to_tensor_dim[1] = 1; + block_to_tensor_dim[2] = 0; + + auto tensor_strides = internal::strides<Layout>(tensor_dims); + auto block_strides = internal::strides<Layout>(block_dims); + + Tensor<float, 3, Layout> block(block_dims); + Tensor<float, 3, Layout> tensor(tensor_dims); + tensor.setRandom(); + + float* tensor_data = tensor.data(); + float* block_data = block.data(); + + using TensorBlockIO = internal::TensorBlockIO<float, Index, 3, Layout>; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Read from a tensor into a block. + IODst dst(block_dims, block_strides, block_data, 0); + IOSrc src(tensor_strides, tensor_data, 0); + + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/block_to_tensor_dim); + + TensorMap<Tensor<float, 3, Layout> > block_tensor(block_data, block_dims); + TensorMap<Tensor<float, 3, Layout> > tensor_tensor(tensor_data, tensor_dims); + + for (Index d0 = 0; d0 < tensor_dims[0]; ++d0) { + for (Index d1 = 0; d1 < tensor_dims[1]; ++d1) { + for (Index d2 = 0; d2 < tensor_dims[2]; ++d2) { + float block_value = block_tensor(d2, d1, d0); + float tensor_value = tensor_tensor(d0, d1, d2); + VERIFY_IS_EQUAL(block_value, tensor_value); + } + } + } +} + +// This is the special case for reading data with reordering, when dimensions +// before/after reordering are the same. Squeezing reads in this case is allowed +// because we reorder outer dimensions. +template <int Layout> +static void test_block_io_copy_using_reordered_dimensions_squeeze() { + DSizes<Index, 4> tensor_dims(7, 5, 9, 9); + DSizes<Index, 4> block_dims = tensor_dims; + + DSizes<int, 4> block_to_tensor_dim; + block_to_tensor_dim[0] = 0; + block_to_tensor_dim[1] = 1; + block_to_tensor_dim[2] = 3; + block_to_tensor_dim[3] = 2; + + auto tensor_strides = internal::strides<Layout>(tensor_dims); + auto block_strides = internal::strides<Layout>(block_dims); + + Tensor<float, 4, Layout> block(block_dims); + Tensor<float, 4, Layout> tensor(tensor_dims); + tensor.setRandom(); + + float* tensor_data = tensor.data(); + float* block_data = block.data(); + + using TensorBlockIO = internal::TensorBlockIO<float, Index, 4, Layout>; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Read from a tensor into a block. + IODst dst(block_dims, block_strides, block_data, 0); + IOSrc src(tensor_strides, tensor_data, 0); + + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/block_to_tensor_dim); + + TensorMap<Tensor<float, 4, Layout> > block_tensor(block_data, block_dims); + TensorMap<Tensor<float, 4, Layout> > tensor_tensor(tensor_data, tensor_dims); + + for (Index d0 = 0; d0 < tensor_dims[0]; ++d0) { + for (Index d1 = 0; d1 < tensor_dims[1]; ++d1) { + for (Index d2 = 0; d2 < tensor_dims[2]; ++d2) { + for (Index d3 = 0; d3 < tensor_dims[3]; ++d3) { + float block_value = block_tensor(d0, d1, d3, d2); + float tensor_value = tensor_tensor(d0, d1, d2, d3); + VERIFY_IS_EQUAL(block_value, tensor_value); + } + } + } + } +} + +template <int Layout> +static void test_block_io_zero_stride() { + DSizes<Index, 5> rnd_dims = RandomDims<5>(1, 30); + + DSizes<Index, 5> input_tensor_dims = rnd_dims; + input_tensor_dims[0] = 1; + input_tensor_dims[2] = 1; + input_tensor_dims[4] = 1; + + Tensor<float, 5, Layout> input(input_tensor_dims); + input.setRandom(); + + DSizes<Index, 5> output_tensor_dims = rnd_dims; + + auto input_tensor_strides = internal::strides<Layout>(input_tensor_dims); + auto output_tensor_strides = internal::strides<Layout>(output_tensor_dims); + + auto input_tensor_strides_with_zeros = input_tensor_strides; + input_tensor_strides_with_zeros[0] = 0; + input_tensor_strides_with_zeros[2] = 0; + input_tensor_strides_with_zeros[4] = 0; + + Tensor<float, 5, Layout> output(output_tensor_dims); + output.setRandom(); + + using TensorBlockIO = internal::TensorBlockIO<float, Index, 5, Layout>; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Write data from input to output with broadcasting in dims [0, 2, 4]. + IODst dst(output_tensor_dims, output_tensor_strides, output.data(), 0); + IOSrc src(input_tensor_strides_with_zeros, input.data(), 0); + TensorBlockIO::Copy(dst, src); + + for (int i = 0; i < output_tensor_dims[0]; ++i) { + for (int j = 0; j < output_tensor_dims[1]; ++j) { + for (int k = 0; k < output_tensor_dims[2]; ++k) { + for (int l = 0; l < output_tensor_dims[3]; ++l) { + for (int m = 0; m < output_tensor_dims[4]; ++m) { + float input_value = input(0, j, 0, l, 0); + float output_value = output(i, j, k, l, m); + VERIFY_IS_EQUAL(input_value, output_value); + } + } + } + } + } +} + +template <int Layout> +static void test_block_io_squeeze_ones() { + using TensorBlockIO = internal::TensorBlockIO<float, Index, 5, Layout>; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Total size > 1. + { + DSizes<Index, 5> block_sizes(1, 2, 1, 2, 1); + auto strides = internal::strides<Layout>(block_sizes); + + // Create a random input tensor. + Tensor<float, 5> input(block_sizes); + input.setRandom(); + + Tensor<float, 5> output(block_sizes); + + IODst dst(block_sizes, strides, output.data(), 0); + IOSrc src(strides, input.data()); + TensorBlockIO::Copy(dst, src); + + for (Index i = 0; i < block_sizes.TotalSize(); ++i) { + VERIFY_IS_EQUAL(output.data()[i], input.data()[i]); + } + } + + // Total size == 1. + { + DSizes<Index, 5> block_sizes(1, 1, 1, 1, 1); + auto strides = internal::strides<Layout>(block_sizes); + + // Create a random input tensor. + Tensor<float, 5> input(block_sizes); + input.setRandom(); + + Tensor<float, 5> output(block_sizes); + + IODst dst(block_sizes, strides, output.data(), 0); + IOSrc src(strides, input.data()); + TensorBlockIO::Copy(dst, src); + + for (Index i = 0; i < block_sizes.TotalSize(); ++i) { + VERIFY_IS_EQUAL(output.data()[i], input.data()[i]); + } + } +} + +#define CALL_SUBTESTS(NAME) \ + CALL_SUBTEST((NAME<float, 1, RowMajor>())); \ + CALL_SUBTEST((NAME<float, 2, RowMajor>())); \ + CALL_SUBTEST((NAME<float, 4, RowMajor>())); \ + CALL_SUBTEST((NAME<float, 5, RowMajor>())); \ + CALL_SUBTEST((NAME<float, 1, ColMajor>())); \ + CALL_SUBTEST((NAME<float, 2, ColMajor>())); \ + CALL_SUBTEST((NAME<float, 4, ColMajor>())); \ + CALL_SUBTEST((NAME<float, 5, ColMajor>())); \ + CALL_SUBTEST((NAME<bool, 1, RowMajor>())); \ + CALL_SUBTEST((NAME<bool, 2, RowMajor>())); \ + CALL_SUBTEST((NAME<bool, 4, RowMajor>())); \ + CALL_SUBTEST((NAME<bool, 5, RowMajor>())); \ + CALL_SUBTEST((NAME<bool, 1, ColMajor>())); \ + CALL_SUBTEST((NAME<bool, 2, ColMajor>())); \ + CALL_SUBTEST((NAME<bool, 4, ColMajor>())); \ + CALL_SUBTEST((NAME<bool, 5, ColMajor>())) + +EIGEN_DECLARE_TEST(cxx11_tensor_block_io) { + // clang-format off + CALL_SUBTESTS(test_block_io_copy_data_from_source_to_target); + CALL_SUBTESTS(test_block_io_copy_using_reordered_dimensions); + + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_do_not_squeeze<RowMajor>()); + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_do_not_squeeze<ColMajor>()); + + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_squeeze<RowMajor>()); + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_squeeze<ColMajor>()); + + CALL_SUBTEST(test_block_io_zero_stride<RowMajor>()); + CALL_SUBTEST(test_block_io_zero_stride<ColMajor>()); + + CALL_SUBTEST(test_block_io_squeeze_ones<RowMajor>()); + CALL_SUBTEST(test_block_io_squeeze_ones<ColMajor>()); + // clang-format on +} |