diff options
Diffstat (limited to 'test')
164 files changed, 15599 insertions, 2249 deletions
diff --git a/test/AnnoyingScalar.h b/test/AnnoyingScalar.h new file mode 100644 index 000000000..7ace083c5 --- /dev/null +++ b/test/AnnoyingScalar.h @@ -0,0 +1,165 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2018 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TEST_ANNOYING_SCALAR_H +#define EIGEN_TEST_ANNOYING_SCALAR_H + +#include <ostream> + +#if EIGEN_COMP_GNUC +#pragma GCC diagnostic ignored "-Wshadow" +#endif + +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW +struct my_exception +{ + my_exception() {} + ~my_exception() {} +}; +#endif + +// An AnnoyingScalar is a pseudo scalar type that: +// - can randomly through an exception in operator + +// - randomly allocate on the heap or initialize a reference to itself making it non trivially copyable, nor movable, nor relocatable. + +class AnnoyingScalar +{ + public: + AnnoyingScalar() { init(); *v = 0; } + AnnoyingScalar(long double _v) { init(); *v = _v; } + AnnoyingScalar(double _v) { init(); *v = _v; } + AnnoyingScalar(float _v) { init(); *v = _v; } + AnnoyingScalar(int _v) { init(); *v = _v; } + AnnoyingScalar(long _v) { init(); *v = _v; } + #if EIGEN_HAS_CXX11 + AnnoyingScalar(long long _v) { init(); *v = _v; } + #endif + AnnoyingScalar(const AnnoyingScalar& other) { init(); *v = *(other.v); } + ~AnnoyingScalar() { + if(v!=&data) + delete v; + instances--; + } + + void init() { + if(internal::random<bool>()) + v = new float; + else + v = &data; + instances++; + } + + AnnoyingScalar operator+(const AnnoyingScalar& other) const + { + #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW + countdown--; + if(countdown<=0 && !dont_throw) + throw my_exception(); + #endif + return AnnoyingScalar(*v+*other.v); + } + + AnnoyingScalar operator-() const + { return AnnoyingScalar(-*v); } + + AnnoyingScalar operator-(const AnnoyingScalar& other) const + { return AnnoyingScalar(*v-*other.v); } + + AnnoyingScalar operator*(const AnnoyingScalar& other) const + { return AnnoyingScalar((*v)*(*other.v)); } + + AnnoyingScalar operator/(const AnnoyingScalar& other) const + { return AnnoyingScalar((*v)/(*other.v)); } + + AnnoyingScalar& operator+=(const AnnoyingScalar& other) { *v += *other.v; return *this; } + AnnoyingScalar& operator-=(const AnnoyingScalar& other) { *v -= *other.v; return *this; } + AnnoyingScalar& operator*=(const AnnoyingScalar& other) { *v *= *other.v; return *this; } + AnnoyingScalar& operator/=(const AnnoyingScalar& other) { *v /= *other.v; return *this; } + AnnoyingScalar& operator= (const AnnoyingScalar& other) { *v = *other.v; return *this; } + + bool operator==(const AnnoyingScalar& other) const { return *v == *other.v; } + bool operator!=(const AnnoyingScalar& other) const { return *v != *other.v; } + bool operator<=(const AnnoyingScalar& other) const { return *v <= *other.v; } + bool operator< (const AnnoyingScalar& other) const { return *v < *other.v; } + bool operator>=(const AnnoyingScalar& other) const { return *v >= *other.v; } + bool operator> (const AnnoyingScalar& other) const { return *v > *other.v; } + + float* v; + float data; + static int instances; +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW + static int countdown; + static bool dont_throw; +#endif +}; + +AnnoyingScalar real(const AnnoyingScalar &x) { return x; } +AnnoyingScalar imag(const AnnoyingScalar & ) { return 0; } +AnnoyingScalar conj(const AnnoyingScalar &x) { return x; } +AnnoyingScalar sqrt(const AnnoyingScalar &x) { return std::sqrt(*x.v); } +AnnoyingScalar abs (const AnnoyingScalar &x) { return std::abs(*x.v); } +AnnoyingScalar cos (const AnnoyingScalar &x) { return std::cos(*x.v); } +AnnoyingScalar sin (const AnnoyingScalar &x) { return std::sin(*x.v); } +AnnoyingScalar acos(const AnnoyingScalar &x) { return std::acos(*x.v); } +AnnoyingScalar atan2(const AnnoyingScalar &y,const AnnoyingScalar &x) { return std::atan2(*y.v,*x.v); } + +std::ostream& operator<<(std::ostream& stream,const AnnoyingScalar& x) { + stream << (*(x.v)); + return stream; +} + +int AnnoyingScalar::instances = 0; + +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW +int AnnoyingScalar::countdown = 0; +bool AnnoyingScalar::dont_throw = false; +#endif + +namespace Eigen { +template<> +struct NumTraits<AnnoyingScalar> : NumTraits<float> +{ + enum { + RequireInitialization = 1, + }; + typedef AnnoyingScalar Real; + typedef AnnoyingScalar Nested; + typedef AnnoyingScalar Literal; + typedef AnnoyingScalar NonInteger; +}; + +template<> inline AnnoyingScalar test_precision<AnnoyingScalar>() { return test_precision<float>(); } + +namespace numext { +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isfinite)(const AnnoyingScalar& x) { + return (numext::isfinite)(*x.v); +} +} + +namespace internal { + template<> EIGEN_STRONG_INLINE double cast(const AnnoyingScalar& x) { return double(*x.v); } + template<> EIGEN_STRONG_INLINE float cast(const AnnoyingScalar& x) { return *x.v; } +} +} // namespace Eigen + +AnnoyingScalar get_test_precision(const AnnoyingScalar&) +{ return Eigen::test_precision<AnnoyingScalar>(); } + +AnnoyingScalar test_relative_error(const AnnoyingScalar &a, const AnnoyingScalar &b) +{ return test_relative_error(*a.v, *b.v); } + +inline bool test_isApprox(const AnnoyingScalar &a, const AnnoyingScalar &b) +{ return internal::isApprox(*a.v, *b.v, test_precision<float>()); } + +inline bool test_isMuchSmallerThan(const AnnoyingScalar &a, const AnnoyingScalar &b) +{ return test_isMuchSmallerThan(*a.v, *b.v); } + +#endif // EIGEN_TEST_ANNOYING_SCALAR_H diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 0747aa6cb..5136f82aa 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,35 +1,30 @@ -# generate split test header file only if it does not yet exist -# in order to prevent a rebuild everytime cmake is configured -if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h) - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "") - foreach(i RANGE 1 999) - file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h - "#ifdef EIGEN_TEST_PART_${i}\n" - "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n" - "#else\n" - "#define CALL_SUBTEST_${i}(FUNC)\n" - "#endif\n\n" - ) - endforeach() +# The file split_test_helper.h was generated at first run, +# it is now included in test/ +if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h) + file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h) endif() # check if we have a Fortran compiler -include("../cmake/language_support.cmake") - -workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) - -if(EIGEN_Fortran_COMPILER_WORKS) - enable_language(Fortran OPTIONAL) - if(NOT CMAKE_Fortran_COMPILER) - set(EIGEN_Fortran_COMPILER_WORKS OFF) - endif() -endif() - -if(NOT EIGEN_Fortran_COMPILER_WORKS) +include(CheckLanguage) +check_language(Fortran) +if(CMAKE_Fortran_COMPILER) + enable_language(Fortran) + set(EIGEN_Fortran_COMPILER_WORKS ON) +else() + set(EIGEN_Fortran_COMPILER_WORKS OFF) # search for a default Lapack library to complete Eigen's one find_package(LAPACK QUIET) endif() +# TODO do the same for EXTERNAL_LAPACK +option(EIGEN_TEST_EXTERNAL_BLAS "Use external BLAS library for testsuite" OFF) +if(EIGEN_TEST_EXTERNAL_BLAS) + find_package(BLAS REQUIRED) + message(STATUS "BLAS_COMPILER_FLAGS: ${BLAS_COMPILER_FLAGS}") + add_definitions("-DEIGEN_USE_BLAS") # is adding ${BLAS_COMPILER_FLAGS} necessary? + list(APPEND EXTERNAL_LIBS "${BLAS_LIBRARIES}") +endif() + # configure blas/lapack (use Eigen's ones) set(EIGEN_BLAS_LIBRARIES eigen_blas) set(EIGEN_LAPACK_LIBRARIES eigen_lapack) @@ -39,37 +34,48 @@ if(EIGEN_TEST_MATRIX_DIR) if(NOT WIN32) message(STATUS "Test realworld sparse matrices: ${EIGEN_TEST_MATRIX_DIR}") add_definitions( -DTEST_REAL_CASES="${EIGEN_TEST_MATRIX_DIR}" ) - else(NOT WIN32) + else() message(STATUS "REAL CASES CAN NOT BE CURRENTLY TESTED ON WIN32") - endif(NOT WIN32) -endif(EIGEN_TEST_MATRIX_DIR) + endif() +endif() set(SPARSE_LIBS " ") -find_package(Cholmod) +find_package(CHOLMOD) if(CHOLMOD_FOUND) add_definitions("-DEIGEN_CHOLMOD_SUPPORT") include_directories(${CHOLMOD_INCLUDES}) set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${EIGEN_BLAS_LIBRARIES} ${EIGEN_LAPACK_LIBRARIES}) set(CHOLMOD_ALL_LIBS ${CHOLMOD_LIBRARIES} ${EIGEN_BLAS_LIBRARIES} ${EIGEN_LAPACK_LIBRARIES}) - ei_add_property(EIGEN_TESTED_BACKENDS "Cholmod, ") + ei_add_property(EIGEN_TESTED_BACKENDS "CHOLMOD, ") else() - ei_add_property(EIGEN_MISSING_BACKENDS "Cholmod, ") + ei_add_property(EIGEN_MISSING_BACKENDS "CHOLMOD, ") endif() -find_package(Umfpack) +find_package(UMFPACK) if(UMFPACK_FOUND) add_definitions("-DEIGEN_UMFPACK_SUPPORT") include_directories(${UMFPACK_INCLUDES}) set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${EIGEN_BLAS_LIBRARIES}) set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${EIGEN_BLAS_LIBRARIES}) - ei_add_property(EIGEN_TESTED_BACKENDS "UmfPack, ") + ei_add_property(EIGEN_TESTED_BACKENDS "UMFPACK, ") else() - ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ") + ei_add_property(EIGEN_MISSING_BACKENDS "UMFPACK, ") +endif() + +find_package(KLU) +if(KLU_FOUND) + add_definitions("-DEIGEN_KLU_SUPPORT") + include_directories(${KLU_INCLUDES}) + set(SPARSE_LIBS ${SPARSE_LIBS} ${KLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES}) + set(KLU_ALL_LIBS ${KLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES}) + ei_add_property(EIGEN_TESTED_BACKENDS "KLU, ") +else() + ei_add_property(EIGEN_MISSING_BACKENDS "KLU, ") endif() find_package(SuperLU 4.0) -if(SUPERLU_FOUND) +if(SuperLU_FOUND) add_definitions("-DEIGEN_SUPERLU_SUPPORT") include_directories(${SUPERLU_INCLUDES}) set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${EIGEN_BLAS_LIBRARIES}) @@ -80,7 +86,7 @@ else() endif() -find_package(PASTIX QUIET COMPONENTS METIS SCOTCH) +find_package(PASTIX QUIET COMPONENTS METIS SEQ) # check that the PASTIX found is a version without MPI find_path(PASTIX_pastix_nompi.h_INCLUDE_DIRS NAMES pastix_nompi.h @@ -99,9 +105,9 @@ if(PASTIX_FOUND AND PASTIX_pastix_nompi.h_INCLUDE_DIRS) elseif(METIS_FOUND) include_directories(${METIS_INCLUDE_DIRS}) set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES}) - else(SCOTCH_FOUND) + else() ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ") - endif(SCOTCH_FOUND) + endif() set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES_DEP} ${ORDERING_LIBRARIES}) set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES_DEP}) ei_add_property(EIGEN_TESTED_BACKENDS "PaStiX, ") @@ -137,11 +143,11 @@ if(NOT EIGEN_TEST_NOQT) else() ei_add_property(EIGEN_MISSING_BACKENDS "Qt4 support, ") endif() -endif(NOT EIGEN_TEST_NOQT) +endif() if(TEST_LIB) add_definitions("-DEIGEN_EXTERN_INSTANTIATIONS=1") -endif(TEST_LIB) +endif() set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Official") add_custom_target(BuildOfficial) @@ -153,23 +159,27 @@ ei_add_test(sizeof) ei_add_test(dynalloc) ei_add_test(nomalloc) ei_add_test(first_aligned) +ei_add_test(type_alias) ei_add_test(nullary) ei_add_test(mixingtypes) +ei_add_test(io) ei_add_test(packetmath "-DEIGEN_FAST_MATH=1") -ei_add_test(unalignedassert) ei_add_test(vectorization_logic) ei_add_test(basicstuff) ei_add_test(constructor) ei_add_test(linearstructure) ei_add_test(integer_types) ei_add_test(unalignedcount) -if(NOT EIGEN_TEST_NO_EXCEPTIONS) +if(NOT EIGEN_TEST_NO_EXCEPTIONS AND NOT EIGEN_TEST_OPENMP) ei_add_test(exceptions) endif() ei_add_test(redux) ei_add_test(visitor) ei_add_test(block) ei_add_test(corners) +ei_add_test(symbolic_index) +ei_add_test(indexed_view) +ei_add_test(reshape) ei_add_test(swap) ei_add_test(resize) ei_add_test(conservative_resize) @@ -185,7 +195,7 @@ ei_add_test(smallvectors) ei_add_test(mapped_matrix) ei_add_test(mapstride) ei_add_test(mapstaticmethods) -ei_add_test(array) +ei_add_test(array_cwise) ei_add_test(array_for_matrix) ei_add_test(array_replicate) ei_add_test(array_reverse) @@ -254,6 +264,7 @@ ei_add_test(sparselu) ei_add_test(sparseqr) ei_add_test(umeyama) ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}") +ei_add_test(nestbyvalue) ei_add_test(zerosized) ei_add_test(dontalign) ei_add_test(evaluators) @@ -269,7 +280,15 @@ ei_add_test(ctorleak) ei_add_test(mpl2only) ei_add_test(inplace_decomposition) ei_add_test(half_float) +ei_add_test(bfloat16_float) ei_add_test(array_of_string) +ei_add_test(num_dimensions) +ei_add_test(stl_iterators) +ei_add_test(blasutil) +if(EIGEN_TEST_CXX11) + ei_add_test(initializer_list_construction) + ei_add_test(diagonal_matrix_variadic_ctor) +endif() add_executable(bug1213 bug1213.cpp bug1213_main.cpp) @@ -289,12 +308,16 @@ ei_add_test(fastmath " ${EIGEN_FASTMATH_FLAGS} ") if(QT4_FOUND) ei_add_test(qtvector "" "${QT_QTCORE_LIBRARY}") -endif(QT4_FOUND) +endif() if(UMFPACK_FOUND) ei_add_test(umfpack_support "" "${UMFPACK_ALL_LIBS}") endif() +if(KLU_FOUND OR SuiteSparse_FOUND) + ei_add_test(klu_support "" "${KLU_ALL_LIBS}") +endif() + if(SUPERLU_FOUND) ei_add_test(superlu_support "" "${SUPERLU_ALL_LIBS}") endif() @@ -330,6 +353,9 @@ if(CMAKE_COMPILER_IS_GNUCXX AND NOT CXX_IS_QCC) ei_add_property(EIGEN_TESTING_SUMMARY "CXX_VERSION: ${EIGEN_CXX_VERSION_STRING}\n") endif() ei_add_property(EIGEN_TESTING_SUMMARY "CXX_FLAGS: ${CMAKE_CXX_FLAGS}\n") +if (EIGEN_TEST_CUSTOM_CXX_FLAGS) + ei_add_property(EIGEN_TESTING_SUMMARY "Custom CXX flags: ${EIGEN_TEST_CUSTOM_CXX_FLAGS}\n") +endif() ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags: ${SPARSE_LIBS}\n") option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF) @@ -339,7 +365,7 @@ if(EIGEN_TEST_EIGEN2) endif() # boost MP unit test -find_package(Boost) +find_package(Boost 1.53.0) if(Boost_FOUND) include_directories(${Boost_INCLUDE_DIRS}) ei_add_test(boostmultiprec "" "${Boost_LIBRARIES}") @@ -363,28 +389,77 @@ find_package(CUDA 5.0) if(CUDA_FOUND) set(CUDA_PROPAGATE_HOST_FLAGS OFF) + + set(EIGEN_CUDA_RELAXED_CONSTEXPR "--expt-relaxed-constexpr") + if (${CUDA_VERSION} STREQUAL "7.0") + set(EIGEN_CUDA_RELAXED_CONSTEXPR "--relaxed-constexpr") + endif() + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE) endif() if(EIGEN_TEST_CUDA_CLANG) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 --cuda-gpu-arch=sm_30") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + string(APPEND CMAKE_CXX_FLAGS " --cuda-path=${CUDA_TOOLKIT_ROOT_DIR}") + foreach(GPU IN LISTS EIGEN_CUDA_COMPUTE_ARCH) + string(APPEND CMAKE_CXX_FLAGS " --cuda-gpu-arch=sm_${GPU}") + endforeach() + else() + foreach(GPU IN LISTS EIGEN_CUDA_COMPUTE_ARCH) + string(APPEND CUDA_NVCC_FLAGS " -gencode arch=compute_${GPU},code=sm_${GPU}") + endforeach() endif() - cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR}) + string(APPEND CUDA_NVCC_FLAGS " ${EIGEN_CUDA_RELAXED_CONSTEXPR}") set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu") - ei_add_test(cuda_basic) + ei_add_test(gpu_basic) unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) -endif(CUDA_FOUND) +endif() + +endif() -endif(EIGEN_TEST_CUDA) +# HIP unit tests +option(EIGEN_TEST_HIP "Add HIP support." OFF) +if (EIGEN_TEST_HIP) -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests) -add_test(NAME failtests WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/failtests COMMAND ${CMAKE_COMMAND} ${Eigen_SOURCE_DIR} -G "${CMAKE_GENERATOR}" -DEIGEN_FAILTEST=ON) + set(HIP_PATH "/opt/rocm/hip" CACHE STRING "Path to the HIP installation.") -option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF) -IF(EIGEN_TEST_BUILD_DOCUMENTATION) + if (EXISTS ${HIP_PATH}) + + list(APPEND CMAKE_MODULE_PATH ${HIP_PATH}/cmake) + + find_package(HIP REQUIRED) + if (HIP_FOUND) + + execute_process(COMMAND ${HIP_PATH}/bin/hipconfig --platform OUTPUT_VARIABLE HIP_PLATFORM) + + if ((${HIP_PLATFORM} STREQUAL "hcc") OR (${HIP_PLATFORM} STREQUAL "amd")) + + include_directories(${HIP_PATH}/include) + + set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu") + ei_add_test(gpu_basic) + unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) + + elseif ((${HIP_PLATFORM} STREQUAL "nvcc") OR (${HIP_PLATFORM} STREQUAL "nvidia")) + message(FATAL_ERROR "HIP_PLATFORM = nvcc is not supported within Eigen") + else () + message(FATAL_ERROR "Unknown HIP_PLATFORM = ${HIP_PLATFORM}") + endif() + endif() + else () + message(FATAL_ERROR "EIGEN_TEST_HIP is ON, but the specified HIP_PATH (${HIP_PATH}) does not exist") + endif() +endif() + +cmake_dependent_option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF "EIGEN_BUILD_DOC" OFF) +if(EIGEN_TEST_BUILD_DOCUMENTATION) add_dependencies(buildtests doc) -ENDIF() +endif() + +# Register all smoke tests +include("EigenSmokeTestList") +ei_add_smoke_tests("${ei_smoke_test_list}") diff --git a/test/MovableScalar.h b/test/MovableScalar.h new file mode 100644 index 000000000..6a90d037a --- /dev/null +++ b/test/MovableScalar.h @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020 Sebastien Boisvert <seb@boisvert.info> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MISC_MOVABLE_SCALAR_H +#define EIGEN_MISC_MOVABLE_SCALAR_H + +#include <vector> + +namespace Eigen +{ +template <typename Scalar, typename Base = std::vector<Scalar>> +struct MovableScalar : public Base +{ + MovableScalar() = default; + ~MovableScalar() = default; + MovableScalar(const MovableScalar&) = default; + MovableScalar(MovableScalar&& other) = default; + MovableScalar& operator=(const MovableScalar&) = default; + MovableScalar& operator=(MovableScalar&& other) = default; + MovableScalar(Scalar scalar) : Base(100, scalar) {} + + operator Scalar() const { return this->size() > 0 ? this->back() : Scalar(); } +}; + +template<> struct NumTraits<MovableScalar<float>> : GenericNumTraits<float> {}; +} + +#endif + diff --git a/test/SafeScalar.h b/test/SafeScalar.h new file mode 100644 index 000000000..c5cb75717 --- /dev/null +++ b/test/SafeScalar.h @@ -0,0 +1,30 @@ + +// A Scalar that asserts for uninitialized access. +template<typename T> +class SafeScalar { + public: + SafeScalar() : initialized_(false) {} + SafeScalar(const SafeScalar& other) { + *this = other; + } + SafeScalar& operator=(const SafeScalar& other) { + val_ = T(other); + initialized_ = true; + return *this; + } + + SafeScalar(T val) : val_(val), initialized_(true) {} + SafeScalar& operator=(T val) { + val_ = val; + initialized_ = true; + } + + operator T() const { + VERIFY(initialized_ && "Uninitialized access."); + return val_; + } + + private: + T val_; + bool initialized_; +}; diff --git a/test/adjoint.cpp b/test/adjoint.cpp index bdea51c10..4c4f98bb9 100644 --- a/test/adjoint.cpp +++ b/test/adjoint.cpp @@ -70,7 +70,6 @@ template<typename MatrixType> void adjoint(const MatrixType& m) Transpose.h Conjugate.h Dot.h */ using std::abs; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; @@ -144,9 +143,55 @@ template<typename MatrixType> void adjoint(const MatrixType& m) RealVectorType rv1 = RealVectorType::Random(rows); VERIFY_IS_APPROX(v1.dot(rv1.template cast<Scalar>()), v1.dot(rv1)); VERIFY_IS_APPROX(rv1.template cast<Scalar>().dot(v1), rv1.dot(v1)); + + VERIFY( is_same_type(m1,m1.template conjugateIf<false>()) ); + VERIFY( is_same_type(m1.conjugate(),m1.template conjugateIf<true>()) ); +} + +template<int> +void adjoint_extra() +{ + MatrixXcf a(10,10), b(10,10); + VERIFY_RAISES_ASSERT(a = a.transpose()); + VERIFY_RAISES_ASSERT(a = a.transpose() + b); + VERIFY_RAISES_ASSERT(a = b + a.transpose()); + VERIFY_RAISES_ASSERT(a = a.conjugate().transpose()); + VERIFY_RAISES_ASSERT(a = a.adjoint()); + VERIFY_RAISES_ASSERT(a = a.adjoint() + b); + VERIFY_RAISES_ASSERT(a = b + a.adjoint()); + + // no assertion should be triggered for these cases: + a.transpose() = a.transpose(); + a.transpose() += a.transpose(); + a.transpose() += a.transpose() + b; + a.transpose() = a.adjoint(); + a.transpose() += a.adjoint(); + a.transpose() += a.adjoint() + b; + + // regression tests for check_for_aliasing + MatrixXd c(10,10); + c = 1.0 * MatrixXd::Ones(10,10) + c; + c = MatrixXd::Ones(10,10) * 1.0 + c; + c = c + MatrixXd::Ones(10,10) .cwiseProduct( MatrixXd::Zero(10,10) ); + c = MatrixXd::Ones(10,10) * MatrixXd::Zero(10,10); + + // regression for bug 1646 + for (int j = 0; j < 10; ++j) { + c.col(j).head(j) = c.row(j).head(j); + } + + for (int j = 0; j < 10; ++j) { + c.col(j) = c.row(j); + } + + a.conservativeResize(1,1); + a = a.transpose(); + + a.conservativeResize(0,0); + a = a.transpose(); } -void test_adjoint() +EIGEN_DECLARE_TEST(adjoint) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( adjoint(Matrix<float, 1, 1>()) ); @@ -169,32 +214,6 @@ void test_adjoint() // test a large static matrix only once CALL_SUBTEST_7( adjoint(Matrix<float, 100, 100>()) ); -#ifdef EIGEN_TEST_PART_13 - { - MatrixXcf a(10,10), b(10,10); - VERIFY_RAISES_ASSERT(a = a.transpose()); - VERIFY_RAISES_ASSERT(a = a.transpose() + b); - VERIFY_RAISES_ASSERT(a = b + a.transpose()); - VERIFY_RAISES_ASSERT(a = a.conjugate().transpose()); - VERIFY_RAISES_ASSERT(a = a.adjoint()); - VERIFY_RAISES_ASSERT(a = a.adjoint() + b); - VERIFY_RAISES_ASSERT(a = b + a.adjoint()); - - // no assertion should be triggered for these cases: - a.transpose() = a.transpose(); - a.transpose() += a.transpose(); - a.transpose() += a.transpose() + b; - a.transpose() = a.adjoint(); - a.transpose() += a.adjoint(); - a.transpose() += a.adjoint() + b; - - // regression tests for check_for_aliasing - MatrixXd c(10,10); - c = 1.0 * MatrixXd::Ones(10,10) + c; - c = MatrixXd::Ones(10,10) * 1.0 + c; - c = c + MatrixXd::Ones(10,10) .cwiseProduct( MatrixXd::Zero(10,10) ); - c = MatrixXd::Ones(10,10) * MatrixXd::Zero(10,10); - } -#endif + CALL_SUBTEST_13( adjoint_extra<0>() ); } diff --git a/test/array.cpp b/test/array_cwise.cpp index 15c3266a9..0cc438b39 100644 --- a/test/array.cpp +++ b/test/array_cwise.cpp @@ -9,16 +9,88 @@ #include "main.h" + +// Test the corner cases of pow(x, y) for real types. +template<typename Scalar> +void pow_test() { + const Scalar zero = Scalar(0); + const Scalar eps = Eigen::NumTraits<Scalar>::epsilon(); + const Scalar one = Scalar(1); + const Scalar two = Scalar(2); + const Scalar three = Scalar(3); + const Scalar sqrt_half = Scalar(std::sqrt(0.5)); + const Scalar sqrt2 = Scalar(std::sqrt(2)); + const Scalar inf = Eigen::NumTraits<Scalar>::infinity(); + const Scalar nan = Eigen::NumTraits<Scalar>::quiet_NaN(); + const Scalar denorm_min = std::numeric_limits<Scalar>::denorm_min(); + const Scalar min = (std::numeric_limits<Scalar>::min)(); + const Scalar max = (std::numeric_limits<Scalar>::max)(); + const Scalar max_exp = (static_cast<Scalar>(int(Eigen::NumTraits<Scalar>::max_exponent())) * Scalar(EIGEN_LN2)) / eps; + + const static Scalar abs_vals[] = {zero, + denorm_min, + min, + eps, + sqrt_half, + one, + sqrt2, + two, + three, + max_exp, + max, + inf, + nan}; + const int abs_cases = 13; + const int num_cases = 2*abs_cases * 2*abs_cases; + // Repeat the same value to make sure we hit the vectorized path. + const int num_repeats = 32; + Array<Scalar, Dynamic, Dynamic> x(num_repeats, num_cases); + Array<Scalar, Dynamic, Dynamic> y(num_repeats, num_cases); + int count = 0; + for (int i = 0; i < abs_cases; ++i) { + const Scalar abs_x = abs_vals[i]; + for (int sign_x = 0; sign_x < 2; ++sign_x) { + Scalar x_case = sign_x == 0 ? -abs_x : abs_x; + for (int j = 0; j < abs_cases; ++j) { + const Scalar abs_y = abs_vals[j]; + for (int sign_y = 0; sign_y < 2; ++sign_y) { + Scalar y_case = sign_y == 0 ? -abs_y : abs_y; + for (int repeat = 0; repeat < num_repeats; ++repeat) { + x(repeat, count) = x_case; + y(repeat, count) = y_case; + } + ++count; + } + } + } + } + + Array<Scalar, Dynamic, Dynamic> actual = x.pow(y); + const Scalar tol = test_precision<Scalar>(); + bool all_pass = true; + for (int i = 0; i < 1; ++i) { + for (int j = 0; j < num_cases; ++j) { + Scalar e = static_cast<Scalar>(std::pow(x(i,j), y(i,j))); + Scalar a = actual(i, j); + bool fail = !(a==e) && !internal::isApprox(a, e, tol) && !((numext::isnan)(a) && (numext::isnan)(e)); + all_pass &= !fail; + if (fail) { + std::cout << "pow(" << x(i,j) << "," << y(i,j) << ") = " << a << " != " << e << std::endl; + } + } + } + VERIFY(all_pass); +} + template<typename ArrayType> void array(const ArrayType& m) { - typedef typename ArrayType::Index Index; typedef typename ArrayType::Scalar Scalar; typedef typename ArrayType::RealScalar RealScalar; typedef Array<Scalar, ArrayType::RowsAtCompileTime, 1> ColVectorType; typedef Array<Scalar, 1, ArrayType::ColsAtCompileTime> RowVectorType; Index rows = m.rows(); - Index cols = m.cols(); + Index cols = m.cols(); ArrayType m1 = ArrayType::Random(rows, cols), m2 = ArrayType::Random(rows, cols), @@ -44,25 +116,25 @@ template<typename ArrayType> void array(const ArrayType& m) VERIFY_IS_APPROX(m3, m1 + s2); m3 = m1; m3 -= s1; - VERIFY_IS_APPROX(m3, m1 - s1); - + VERIFY_IS_APPROX(m3, m1 - s1); + // scalar operators via Maps m3 = m1; ArrayType::Map(m1.data(), m1.rows(), m1.cols()) -= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 - m2); - + m3 = m1; ArrayType::Map(m1.data(), m1.rows(), m1.cols()) += ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 + m2); - + m3 = m1; ArrayType::Map(m1.data(), m1.rows(), m1.cols()) *= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 * m2); - + m3 = m1; m2 = ArrayType::Random(rows,cols); m2 = (m2==0).select(1,m2); - ArrayType::Map(m1.data(), m1.rows(), m1.cols()) /= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); + ArrayType::Map(m1.data(), m1.rows(), m1.cols()) /= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 / m2); // reductions @@ -84,7 +156,7 @@ template<typename ArrayType> void array(const ArrayType& m) VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1); m3 = m1; VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1); - + // Conversion from scalar VERIFY_IS_APPROX((m3 = s1), ArrayType::Constant(rows,cols,s1)); VERIFY_IS_APPROX((m3 = 1), ArrayType::Constant(rows,cols,1)); @@ -93,16 +165,31 @@ template<typename ArrayType> void array(const ArrayType& m) ArrayType::RowsAtCompileTime==Dynamic?2:ArrayType::RowsAtCompileTime, ArrayType::ColsAtCompileTime==Dynamic?2:ArrayType::ColsAtCompileTime, ArrayType::Options> FixedArrayType; - FixedArrayType f1(s1); - VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); - FixedArrayType f2(numext::real(s1)); - VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); - FixedArrayType f3((int)100*numext::real(s1)); - VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); - f1.setRandom(); - FixedArrayType f4(f1.data()); - VERIFY_IS_APPROX(f4, f1); - + { + FixedArrayType f1(s1); + VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); + FixedArrayType f2(numext::real(s1)); + VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); + FixedArrayType f3((int)100*numext::real(s1)); + VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); + f1.setRandom(); + FixedArrayType f4(f1.data()); + VERIFY_IS_APPROX(f4, f1); + } + #if EIGEN_HAS_CXX11 + { + FixedArrayType f1{s1}; + VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); + FixedArrayType f2{numext::real(s1)}; + VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); + FixedArrayType f3{(int)100*numext::real(s1)}; + VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); + f1.setRandom(); + FixedArrayType f4{f1.data()}; + VERIFY_IS_APPROX(f4, f1); + } + #endif + // pow VERIFY_IS_APPROX(m1.pow(2), m1.square()); VERIFY_IS_APPROX(pow(m1,2), m1.square()); @@ -121,16 +208,56 @@ template<typename ArrayType> void array(const ArrayType& m) // Check possible conflicts with 1D ctor typedef Array<Scalar, Dynamic, 1> OneDArrayType; - OneDArrayType o1(rows); - VERIFY(o1.size()==rows); - OneDArrayType o4((int)rows); - VERIFY(o4.size()==rows); + { + OneDArrayType o1(rows); + VERIFY(o1.size()==rows); + OneDArrayType o2(static_cast<int>(rows)); + VERIFY(o2.size()==rows); + } + #if EIGEN_HAS_CXX11 + { + OneDArrayType o1{rows}; + VERIFY(o1.size()==rows); + OneDArrayType o4{int(rows)}; + VERIFY(o4.size()==rows); + } + #endif + // Check possible conflicts with 2D ctor + typedef Array<Scalar, Dynamic, Dynamic> TwoDArrayType; + typedef Array<Scalar, 2, 1> ArrayType2; + { + TwoDArrayType o1(rows,cols); + VERIFY(o1.rows()==rows); + VERIFY(o1.cols()==cols); + TwoDArrayType o2(static_cast<int>(rows),static_cast<int>(cols)); + VERIFY(o2.rows()==rows); + VERIFY(o2.cols()==cols); + + ArrayType2 o3(rows,cols); + VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols)); + ArrayType2 o4(static_cast<int>(rows),static_cast<int>(cols)); + VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols)); + } + #if EIGEN_HAS_CXX11 + { + TwoDArrayType o1{rows,cols}; + VERIFY(o1.rows()==rows); + VERIFY(o1.cols()==cols); + TwoDArrayType o2{int(rows),int(cols)}; + VERIFY(o2.rows()==rows); + VERIFY(o2.cols()==cols); + + ArrayType2 o3{rows,cols}; + VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols)); + ArrayType2 o4{int(rows),int(cols)}; + VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols)); + } + #endif } template<typename ArrayType> void comparisons(const ArrayType& m) { using std::abs; - typedef typename ArrayType::Index Index; typedef typename ArrayType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; @@ -144,7 +271,7 @@ template<typename ArrayType> void comparisons(const ArrayType& m) m2 = ArrayType::Random(rows, cols), m3(rows, cols), m4 = m1; - + m4 = (m4.abs()==Scalar(0)).select(1,m4); VERIFY(((m1 + Scalar(1)) > m1).all()); @@ -197,7 +324,7 @@ template<typename ArrayType> void comparisons(const ArrayType& m) RealScalar a = m1.abs().mean(); VERIFY( (m1<-a || m1>a).count() == (m1.abs()>a).count()); - typedef Array<typename ArrayType::Index, Dynamic, 1> ArrayOfIndices; + typedef Array<Index, Dynamic, 1> ArrayOfIndices; // TODO allows colwise/rowwise for array VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose()); @@ -208,7 +335,6 @@ template<typename ArrayType> void array_real(const ArrayType& m) { using std::abs; using std::sqrt; - typedef typename ArrayType::Index Index; typedef typename ArrayType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; @@ -220,7 +346,7 @@ template<typename ArrayType> void array_real(const ArrayType& m) m3(rows, cols), m4 = m1; - m4 = (m4.abs()==Scalar(0)).select(1,m4); + m4 = (m4.abs()==Scalar(0)).select(Scalar(1),m4); Scalar s1 = internal::random<Scalar>(); @@ -234,31 +360,39 @@ template<typename ArrayType> void array_real(const ArrayType& m) VERIFY_IS_APPROX(m1.sinh(), sinh(m1)); VERIFY_IS_APPROX(m1.cosh(), cosh(m1)); VERIFY_IS_APPROX(m1.tanh(), tanh(m1)); +#if EIGEN_HAS_CXX11_MATH + VERIFY_IS_APPROX(m1.tanh().atanh(), atanh(tanh(m1))); + VERIFY_IS_APPROX(m1.sinh().asinh(), asinh(sinh(m1))); + VERIFY_IS_APPROX(m1.cosh().acosh(), acosh(cosh(m1))); +#endif + VERIFY_IS_APPROX(m1.logistic(), logistic(m1)); VERIFY_IS_APPROX(m1.arg(), arg(m1)); VERIFY_IS_APPROX(m1.round(), round(m1)); + VERIFY_IS_APPROX(m1.rint(), rint(m1)); VERIFY_IS_APPROX(m1.floor(), floor(m1)); VERIFY_IS_APPROX(m1.ceil(), ceil(m1)); VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all()); VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all()); VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all()); - VERIFY_IS_APPROX(m1.inverse(), inverse(m1)); + VERIFY_IS_APPROX(m4.inverse(), inverse(m4)); VERIFY_IS_APPROX(m1.abs(), abs(m1)); VERIFY_IS_APPROX(m1.abs2(), abs2(m1)); VERIFY_IS_APPROX(m1.square(), square(m1)); VERIFY_IS_APPROX(m1.cube(), cube(m1)); VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); VERIFY_IS_APPROX(m1.sign(), sign(m1)); + VERIFY((m1.sqrt().sign().isNaN() == (Eigen::isnan)(sign(sqrt(m1)))).all()); - - // avoid NaNs with abs() so verification doesn't fail - m3 = m1.abs(); - VERIFY_IS_APPROX(m3.sqrt(), sqrt(abs(m1))); - VERIFY_IS_APPROX(m3.rsqrt(), Scalar(1)/sqrt(abs(m1))); - VERIFY_IS_APPROX(rsqrt(m3), Scalar(1)/sqrt(abs(m1))); + // avoid inf and NaNs so verification doesn't fail + m3 = m4.abs(); + VERIFY_IS_APPROX(m3.sqrt(), sqrt(abs(m3))); + VERIFY_IS_APPROX(m3.rsqrt(), Scalar(1)/sqrt(abs(m3))); + VERIFY_IS_APPROX(rsqrt(m3), Scalar(1)/sqrt(abs(m3))); VERIFY_IS_APPROX(m3.log(), log(m3)); VERIFY_IS_APPROX(m3.log1p(), log1p(m3)); VERIFY_IS_APPROX(m3.log10(), log10(m3)); + VERIFY_IS_APPROX(m3.log2(), log2(m3)); VERIFY((!(m1>m2) == (m1<=m2)).all()); @@ -266,42 +400,58 @@ template<typename ArrayType> void array_real(const ArrayType& m) VERIFY_IS_APPROX(sin(m1.asin()), m1); VERIFY_IS_APPROX(cos(m1.acos()), m1); VERIFY_IS_APPROX(tan(m1.atan()), m1); - VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1))); - VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1))); - VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1)))); - VERIFY_IS_APPROX(arg(m1), ((m1<0).template cast<Scalar>())*std::acos(-1.0)); + VERIFY_IS_APPROX(sinh(m1), Scalar(0.5)*(exp(m1)-exp(-m1))); + VERIFY_IS_APPROX(cosh(m1), Scalar(0.5)*(exp(m1)+exp(-m1))); + VERIFY_IS_APPROX(tanh(m1), (Scalar(0.5)*(exp(m1)-exp(-m1)))/(Scalar(0.5)*(exp(m1)+exp(-m1)))); + VERIFY_IS_APPROX(logistic(m1), (Scalar(1)/(Scalar(1)+exp(-m1)))); + VERIFY_IS_APPROX(arg(m1), ((m1<Scalar(0)).template cast<Scalar>())*Scalar(std::acos(Scalar(-1)))); VERIFY((round(m1) <= ceil(m1) && round(m1) >= floor(m1)).all()); - VERIFY((Eigen::isnan)((m1*0.0)/0.0).all()); - VERIFY((Eigen::isinf)(m4/0.0).all()); - VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*0.0/0.0)) && (!(Eigen::isfinite)(m4/0.0))).all()); - VERIFY_IS_APPROX(inverse(inverse(m1)),m1); + VERIFY((rint(m1) <= ceil(m1) && rint(m1) >= floor(m1)).all()); + VERIFY(((ceil(m1) - round(m1)) <= Scalar(0.5) || (round(m1) - floor(m1)) <= Scalar(0.5)).all()); + VERIFY(((ceil(m1) - round(m1)) <= Scalar(1.0) && (round(m1) - floor(m1)) <= Scalar(1.0)).all()); + VERIFY(((ceil(m1) - rint(m1)) <= Scalar(0.5) || (rint(m1) - floor(m1)) <= Scalar(0.5)).all()); + VERIFY(((ceil(m1) - rint(m1)) <= Scalar(1.0) && (rint(m1) - floor(m1)) <= Scalar(1.0)).all()); + VERIFY((Eigen::isnan)((m1*Scalar(0))/Scalar(0)).all()); + VERIFY((Eigen::isinf)(m4/Scalar(0)).all()); + VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*Scalar(0)/Scalar(0))) && (!(Eigen::isfinite)(m4/Scalar(0)))).all()); + VERIFY_IS_APPROX(inverse(inverse(m4)),m4); VERIFY((abs(m1) == m1 || abs(m1) == -m1).all()); - VERIFY_IS_APPROX(m3, sqrt(abs2(m1))); + VERIFY_IS_APPROX(m3, sqrt(abs2(m3))); + VERIFY_IS_APPROX(m1.absolute_difference(m2), (m1 > m2).select(m1 - m2, m2 - m1)); VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() ); VERIFY_IS_APPROX( m1*m1.sign(),m1.abs()); VERIFY_IS_APPROX(m1.sign() * m1.abs(), m1); VERIFY_IS_APPROX(numext::abs2(numext::real(m1)) + numext::abs2(numext::imag(m1)), numext::abs2(m1)); - VERIFY_IS_APPROX(numext::abs2(real(m1)) + numext::abs2(imag(m1)), numext::abs2(m1)); + VERIFY_IS_APPROX(numext::abs2(Eigen::real(m1)) + numext::abs2(Eigen::imag(m1)), numext::abs2(m1)); if(!NumTraits<Scalar>::IsComplex) VERIFY_IS_APPROX(numext::real(m1), m1); // shift argument of logarithm so that it is not zero Scalar smallNumber = NumTraits<Scalar>::dummy_precision(); - VERIFY_IS_APPROX((m3 + smallNumber).log() , log(abs(m1) + smallNumber)); - VERIFY_IS_APPROX((m3 + smallNumber + 1).log() , log1p(abs(m1) + smallNumber)); + VERIFY_IS_APPROX((m3 + smallNumber).log() , log(abs(m3) + smallNumber)); + VERIFY_IS_APPROX((m3 + smallNumber + Scalar(1)).log() , log1p(abs(m3) + smallNumber)); VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); VERIFY_IS_APPROX(m1.exp(), exp(m1)); VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); + VERIFY_IS_APPROX(m1.expm1(), expm1(m1)); + VERIFY_IS_APPROX((m3 + smallNumber).exp() - Scalar(1), expm1(abs(m3) + smallNumber)); + VERIFY_IS_APPROX(m3.pow(RealScalar(0.5)), m3.sqrt()); VERIFY_IS_APPROX(pow(m3,RealScalar(0.5)), m3.sqrt()); VERIFY_IS_APPROX(m3.pow(RealScalar(-0.5)), m3.rsqrt()); VERIFY_IS_APPROX(pow(m3,RealScalar(-0.5)), m3.rsqrt()); - VERIFY_IS_APPROX(log10(m3), log(m3)/log(10)); + // Avoid inf and NaN. + m3 = (m1.square()<NumTraits<Scalar>::epsilon()).select(Scalar(1),m3); + VERIFY_IS_APPROX(m3.pow(RealScalar(-2)), m3.square().inverse()); + pow_test<Scalar>(); + + VERIFY_IS_APPROX(log10(m3), log(m3)/numext::log(Scalar(10))); + VERIFY_IS_APPROX(log2(m3), log(m3)/numext::log(Scalar(2))); // scalar by array division const RealScalar tiny = sqrt(std::numeric_limits<RealScalar>::epsilon()); @@ -319,7 +469,6 @@ template<typename ArrayType> void array_real(const ArrayType& m) template<typename ArrayType> void array_complex(const ArrayType& m) { - typedef typename ArrayType::Index Index; typedef typename ArrayType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; @@ -329,7 +478,7 @@ template<typename ArrayType> void array_complex(const ArrayType& m) ArrayType m1 = ArrayType::Random(rows, cols), m2(rows, cols), m4 = m1; - + m4.real() = (m4.real().abs()==RealScalar(0)).select(RealScalar(1),m4.real()); m4.imag() = (m4.imag().abs()==RealScalar(0)).select(RealScalar(1),m4.imag()); @@ -346,13 +495,15 @@ template<typename ArrayType> void array_complex(const ArrayType& m) VERIFY_IS_APPROX(m1.sinh(), sinh(m1)); VERIFY_IS_APPROX(m1.cosh(), cosh(m1)); VERIFY_IS_APPROX(m1.tanh(), tanh(m1)); + VERIFY_IS_APPROX(m1.logistic(), logistic(m1)); VERIFY_IS_APPROX(m1.arg(), arg(m1)); VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all()); VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all()); VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all()); - VERIFY_IS_APPROX(m1.inverse(), inverse(m1)); + VERIFY_IS_APPROX(m4.inverse(), inverse(m4)); VERIFY_IS_APPROX(m1.log(), log(m1)); VERIFY_IS_APPROX(m1.log10(), log10(m1)); + VERIFY_IS_APPROX(m1.log2(), log2(m1)); VERIFY_IS_APPROX(m1.abs(), abs(m1)); VERIFY_IS_APPROX(m1.abs2(), abs2(m1)); VERIFY_IS_APPROX(m1.sqrt(), sqrt(m1)); @@ -366,13 +517,19 @@ template<typename ArrayType> void array_complex(const ArrayType& m) VERIFY_IS_APPROX(m1.exp(), exp(m1)); VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); + VERIFY_IS_APPROX(m1.expm1(), expm1(m1)); + VERIFY_IS_APPROX(expm1(m1), exp(m1) - 1.); + // Check for larger magnitude complex numbers that expm1 matches exp - 1. + VERIFY_IS_APPROX(expm1(10. * m1), exp(10. * m1) - 1.); + VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1))); VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1))); VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1)))); + VERIFY_IS_APPROX(logistic(m1), (1.0/(1.0 + exp(-m1)))); for (Index i = 0; i < m.rows(); ++i) for (Index j = 0; j < m.cols(); ++j) - m3(i,j) = std::atan2(imag(m1(i,j)), real(m1(i,j))); + m3(i,j) = std::atan2(m1(i,j).imag(), m1(i,j).real()); VERIFY_IS_APPROX(arg(m1), m3); std::complex<RealScalar> zero(0.0,0.0); @@ -397,11 +554,12 @@ template<typename ArrayType> void array_complex(const ArrayType& m) VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*zero/zero)) && (!(Eigen::isfinite)(m1/zero))).all()); - VERIFY_IS_APPROX(inverse(inverse(m1)),m1); + VERIFY_IS_APPROX(inverse(inverse(m4)),m4); VERIFY_IS_APPROX(conj(m1.conjugate()), m1); - VERIFY_IS_APPROX(abs(m1), sqrt(square(real(m1))+square(imag(m1)))); + VERIFY_IS_APPROX(abs(m1), sqrt(square(m1.real())+square(m1.imag()))); VERIFY_IS_APPROX(abs(m1), sqrt(abs2(m1))); VERIFY_IS_APPROX(log10(m1), log(m1)/log(10)); + VERIFY_IS_APPROX(log2(m1), log(m1)/log(2)); VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() ); VERIFY_IS_APPROX( m1.sign() * m1.abs(), m1); @@ -419,12 +577,15 @@ template<typename ArrayType> void array_complex(const ArrayType& m) VERIFY_IS_APPROX(m2, m1.transpose()); m2.transposeInPlace(); VERIFY_IS_APPROX(m2, m1); - + // Check vectorized inplace transpose. + ArrayType m5 = ArrayType::Random(131, 131); + ArrayType m6 = m5; + m6.transposeInPlace(); + VERIFY_IS_APPROX(m6, m5.transpose()); } template<typename ArrayType> void min_max(const ArrayType& m) { - typedef typename ArrayType::Index Index; typedef typename ArrayType::Scalar Scalar; Index rows = m.rows(); @@ -449,9 +610,58 @@ template<typename ArrayType> void min_max(const ArrayType& m) VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)( maxM1)); VERIFY_IS_APPROX(m1, (m1.max)( minM1)); + + // min/max with various NaN propagation options. + if (m1.size() > 1 && !NumTraits<Scalar>::IsInteger) { + m1(0,0) = NumTraits<Scalar>::quiet_NaN(); + maxM1 = m1.template maxCoeff<PropagateNaN>(); + minM1 = m1.template minCoeff<PropagateNaN>(); + VERIFY((numext::isnan)(maxM1)); + VERIFY((numext::isnan)(minM1)); + + maxM1 = m1.template maxCoeff<PropagateNumbers>(); + minM1 = m1.template minCoeff<PropagateNumbers>(); + VERIFY(!(numext::isnan)(maxM1)); + VERIFY(!(numext::isnan)(minM1)); + } +} + +template<int N> +struct shift_left { + template<typename Scalar> + Scalar operator()(const Scalar& v) const { + return v << N; + } +}; + +template<int N> +struct arithmetic_shift_right { + template<typename Scalar> + Scalar operator()(const Scalar& v) const { + return v >> N; + } +}; + +template<typename ArrayType> void array_integer(const ArrayType& m) +{ + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2(rows, cols); + + m2 = m1.template shiftLeft<2>(); + VERIFY( (m2 == m1.unaryExpr(shift_left<2>())).all() ); + m2 = m1.template shiftLeft<9>(); + VERIFY( (m2 == m1.unaryExpr(shift_left<9>())).all() ); + + m2 = m1.template shiftRight<2>(); + VERIFY( (m2 == m1.unaryExpr(arithmetic_shift_right<2>())).all() ); + m2 = m1.template shiftRight<9>(); + VERIFY( (m2 == m1.unaryExpr(arithmetic_shift_right<9>())).all() ); } -void test_array() +EIGEN_DECLARE_TEST(array_cwise) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array(Array<float, 1, 1>()) ); @@ -460,6 +670,9 @@ void test_array() CALL_SUBTEST_4( array(ArrayXXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( array(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( array(ArrayXXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array(Array<Index,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array_integer(ArrayXXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array_integer(Array<Index,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( comparisons(Array<float, 1, 1>()) ); @@ -480,6 +693,8 @@ void test_array() CALL_SUBTEST_2( array_real(Array22f()) ); CALL_SUBTEST_3( array_real(Array44d()) ); CALL_SUBTEST_5( array_real(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( array_real(Array<Eigen::half, 32, 32>()) ); + CALL_SUBTEST_8( array_real(Array<Eigen::bfloat16, 32, 32>()) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_4( array_complex(ArrayXXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp index b8721391f..fb6be351e 100644 --- a/test/array_for_matrix.cpp +++ b/test/array_for_matrix.cpp @@ -11,7 +11,6 @@ template<typename MatrixType> void array_for_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType; typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType; @@ -58,7 +57,14 @@ template<typename MatrixType> void array_for_matrix(const MatrixType& m) VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1); // empty objects - VERIFY_IS_APPROX(m1.block(0,0,0,cols).colwise().sum(), RowVectorType::Zero(cols)); + VERIFY_IS_APPROX((m1.template block<0,Dynamic>(0,0,0,cols).colwise().sum()), RowVectorType::Zero(cols)); + VERIFY_IS_APPROX((m1.template block<Dynamic,0>(0,0,rows,0).rowwise().sum()), ColVectorType::Zero(rows)); + VERIFY_IS_APPROX((m1.template block<0,Dynamic>(0,0,0,cols).colwise().prod()), RowVectorType::Ones(cols)); + VERIFY_IS_APPROX((m1.template block<Dynamic,0>(0,0,rows,0).rowwise().prod()), ColVectorType::Ones(rows)); + + VERIFY_IS_APPROX(m1.block(0,0,0,cols).colwise().sum(), RowVectorType::Zero(cols)); + VERIFY_IS_APPROX(m1.block(0,0,rows,0).rowwise().sum(), ColVectorType::Zero(rows)); + VERIFY_IS_APPROX(m1.block(0,0,0,cols).colwise().prod(), RowVectorType::Ones(cols)); VERIFY_IS_APPROX(m1.block(0,0,rows,0).rowwise().prod(), ColVectorType::Ones(rows)); // verify the const accessors exist @@ -83,7 +89,6 @@ template<typename MatrixType> void array_for_matrix(const MatrixType& m) template<typename MatrixType> void comparisons(const MatrixType& m) { using std::abs; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; @@ -140,7 +145,7 @@ template<typename MatrixType> void comparisons(const MatrixType& m) RealScalar a = m1.cwiseAbs().mean(); VERIFY( ((m1.array()<-a).matrix() || (m1.array()>a).matrix()).count() == (m1.cwiseAbs().array()>a).count()); - typedef Matrix<typename MatrixType::Index, Dynamic, 1> VectorOfIndices; + typedef Matrix<Index, Dynamic, 1> VectorOfIndices; // TODO allows colwise/rowwise for array VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().colwise().count(), VectorOfIndices::Constant(cols,rows).transpose()); @@ -172,7 +177,6 @@ template<typename VectorType> void lpNorm(const VectorType& v) template<typename MatrixType> void cwise_min_max(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); @@ -211,7 +215,6 @@ template<typename MatrixType> void cwise_min_max(const MatrixType& m) template<typename MatrixTraits> void resize(const MatrixTraits& t) { - typedef typename MatrixTraits::Index Index; typedef typename MatrixTraits::Scalar Scalar; typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType; typedef Array<Scalar,Dynamic,Dynamic> Array2DType; @@ -260,7 +263,7 @@ void regrrssion_bug_1410() VERIFY((internal::traits<MatrixWrapper<Array4i> >::Flags&LvalueBit)==LvalueBit); } -void test_array_for_matrix() +EIGEN_DECLARE_TEST(array_for_matrix) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array_for_matrix(Matrix<float, 1, 1>()) ); diff --git a/test/array_of_string.cpp b/test/array_of_string.cpp index e23b7c59e..23e51529b 100644 --- a/test/array_of_string.cpp +++ b/test/array_of_string.cpp @@ -9,7 +9,7 @@ #include "main.h" -void test_array_of_string() +EIGEN_DECLARE_TEST(array_of_string) { typedef Array<std::string,1,Dynamic> ArrayXs; ArrayXs a1(3), a2(3), a3(3), a3ref(3); diff --git a/test/array_replicate.cpp b/test/array_replicate.cpp index 779c8fc2f..057c3c77b 100644 --- a/test/array_replicate.cpp +++ b/test/array_replicate.cpp @@ -14,7 +14,6 @@ template<typename MatrixType> void replicate(const MatrixType& m) /* this test covers the following files: Replicate.cpp */ - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX; @@ -69,7 +68,7 @@ template<typename MatrixType> void replicate(const MatrixType& m) VERIFY_IS_APPROX(vx1, v1.colwise().replicate(f2)); } -void test_array_replicate() +EIGEN_DECLARE_TEST(array_replicate) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( replicate(Matrix<float, 1, 1>()) ); diff --git a/test/array_reverse.cpp b/test/array_reverse.cpp index c9d9f90c3..c77528a5b 100644 --- a/test/array_reverse.cpp +++ b/test/array_reverse.cpp @@ -15,7 +15,6 @@ using namespace std; template<typename MatrixType> void reverse(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; @@ -124,7 +123,70 @@ template<typename MatrixType> void reverse(const MatrixType& m) VERIFY_IS_APPROX(x, m1(r, cols - 1 - c)); } -void test_array_reverse() +template<int> +void array_reverse_extra() +{ + Vector4f x; x << 1, 2, 3, 4; + Vector4f y; y << 4, 3, 2, 1; + VERIFY(x.reverse()[1] == 3); + VERIFY(x.reverse() == y); +} + +// Simpler version of reverseInPlace leveraging a bug +// in clang 6/7 with -O2 and AVX or AVX512 enabled. +// This simpler version ensure that the clang bug is not simply hidden +// through mis-inlining of reverseInPlace or other minor changes. +template<typename MatrixType> +EIGEN_DONT_INLINE +void bug1684_job1(MatrixType& m1, MatrixType& m2) +{ + m2 = m1; + m2.col(0).swap(m2.col(3)); + m2.col(1).swap(m2.col(2)); +} + +template<typename MatrixType> +EIGEN_DONT_INLINE +void bug1684_job2(MatrixType& m1, MatrixType& m2) +{ + m2 = m1; // load m1/m2 in AVX registers + m1.col(0) = m2.col(3); // perform 128 bits moves + m1.col(1) = m2.col(2); + m1.col(2) = m2.col(1); + m1.col(3) = m2.col(0); +} + +template<typename MatrixType> +EIGEN_DONT_INLINE +void bug1684_job3(MatrixType& m1, MatrixType& m2) +{ + m2 = m1; + Vector4f tmp; + tmp = m2.col(0); + m2.col(0) = m2.col(3); + m2.col(3) = tmp; + tmp = m2.col(1); + m2.col(1) = m2.col(2); + m2.col(2) = tmp; + +} + +template<int> +void bug1684() +{ + Matrix4f m1 = Matrix4f::Random(); + Matrix4f m2 = Matrix4f::Random(); + bug1684_job1(m1,m2); + VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); + bug1684_job2(m1,m2); + VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); + // This one still fail after our swap's workaround, + // but I expect users not to implement their own swap. + // bug1684_job3(m1,m2); + // VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); +} + +EIGEN_DECLARE_TEST(array_reverse) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( reverse(Matrix<float, 1, 1>()) ); @@ -136,11 +198,7 @@ void test_array_reverse() CALL_SUBTEST_7( reverse(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( reverse(Matrix<float, 100, 100>()) ); CALL_SUBTEST_9( reverse(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_3( bug1684<0>() ); } -#ifdef EIGEN_TEST_PART_3 - Vector4f x; x << 1, 2, 3, 4; - Vector4f y; y << 4, 3, 2, 1; - VERIFY(x.reverse()[1] == 3); - VERIFY(x.reverse() == y); -#endif + CALL_SUBTEST_3( array_reverse_extra<0>() ); } diff --git a/test/bandmatrix.cpp b/test/bandmatrix.cpp index f8c38f7c3..66a1b0db4 100644 --- a/test/bandmatrix.cpp +++ b/test/bandmatrix.cpp @@ -59,7 +59,7 @@ template<typename MatrixType> void bandmatrix(const MatrixType& _m) using Eigen::internal::BandMatrix; -void test_bandmatrix() +EIGEN_DECLARE_TEST(bandmatrix) { for(int i = 0; i < 10*g_repeat ; i++) { Index rows = internal::random<Index>(1,10); diff --git a/test/basicstuff.cpp b/test/basicstuff.cpp index 99d91f9da..4ca607c82 100644 --- a/test/basicstuff.cpp +++ b/test/basicstuff.cpp @@ -10,10 +10,10 @@ #define EIGEN_NO_STATIC_ASSERT #include "main.h" +#include "random_without_cast_overflow.h" template<typename MatrixType> void basicStuff(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType; @@ -49,6 +49,22 @@ template<typename MatrixType> void basicStuff(const MatrixType& m) v1[r] = x; VERIFY_IS_APPROX(x, v1[r]); + // test fetching with various index types. + Index r1 = internal::random<Index>(0, numext::mini(Index(127),rows-1)); + x = v1(static_cast<char>(r1)); + x = v1(static_cast<signed char>(r1)); + x = v1(static_cast<unsigned char>(r1)); + x = v1(static_cast<signed short>(r1)); + x = v1(static_cast<unsigned short>(r1)); + x = v1(static_cast<signed int>(r1)); + x = v1(static_cast<unsigned int>(r1)); + x = v1(static_cast<signed long>(r1)); + x = v1(static_cast<unsigned long>(r1)); +#if EIGEN_HAS_CXX11 + x = v1(static_cast<long long int>(r1)); + x = v1(static_cast<unsigned long long int>(r1)); +#endif + VERIFY_IS_APPROX( v1, v1); VERIFY_IS_NOT_APPROX( v1, 2*v1); VERIFY_IS_MUCH_SMALLER_THAN( vzero, v1); @@ -75,7 +91,7 @@ template<typename MatrixType> void basicStuff(const MatrixType& m) Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> cv(rows); rv = square.row(r); cv = square.col(r); - + VERIFY_IS_APPROX(rv, cv.transpose()); if(cols!=1 && rows!=1 && MatrixType::SizeAtCompileTime!=Dynamic) @@ -105,28 +121,28 @@ template<typename MatrixType> void basicStuff(const MatrixType& m) m1 = m2; VERIFY(m1==m2); VERIFY(!(m1!=m2)); - + // check automatic transposition sm2.setZero(); - for(typename MatrixType::Index i=0;i<rows;++i) + for(Index i=0;i<rows;++i) sm2.col(i) = sm1.row(i); VERIFY_IS_APPROX(sm2,sm1.transpose()); - + sm2.setZero(); - for(typename MatrixType::Index i=0;i<rows;++i) + for(Index i=0;i<rows;++i) sm2.col(i).noalias() = sm1.row(i); VERIFY_IS_APPROX(sm2,sm1.transpose()); - + sm2.setZero(); - for(typename MatrixType::Index i=0;i<rows;++i) + for(Index i=0;i<rows;++i) sm2.col(i).noalias() += sm1.row(i); VERIFY_IS_APPROX(sm2,sm1.transpose()); - + sm2.setZero(); - for(typename MatrixType::Index i=0;i<rows;++i) + for(Index i=0;i<rows;++i) sm2.col(i).noalias() -= sm1.row(i); VERIFY_IS_APPROX(sm2,-sm1.transpose()); - + // check ternary usage { bool b = internal::random<int>(0,10)>5; @@ -144,7 +160,6 @@ template<typename MatrixType> void basicStuff(const MatrixType& m) template<typename MatrixType> void basicStuffComplex(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> RealMatrixType; @@ -180,16 +195,78 @@ template<typename MatrixType> void basicStuffComplex(const MatrixType& m) VERIFY(!static_cast<const MatrixType&>(cm).imag().isZero()); } -#ifdef EIGEN_TEST_PART_2 -void casting() +template<typename SrcScalar, typename TgtScalar> +struct casting_test { + static void run() { + Matrix<SrcScalar,4,4> m; + for (int i=0; i<m.rows(); ++i) { + for (int j=0; j<m.cols(); ++j) { + m(i, j) = internal::random_without_cast_overflow<SrcScalar,TgtScalar>::value(); + } + } + Matrix<TgtScalar,4,4> n = m.template cast<TgtScalar>(); + for (int i=0; i<m.rows(); ++i) { + for (int j=0; j<m.cols(); ++j) { + VERIFY_IS_APPROX(n(i, j), (internal::cast<SrcScalar,TgtScalar>(m(i, j)))); + } + } + } +}; + +template<typename SrcScalar, typename EnableIf = void> +struct casting_test_runner { + static void run() { + casting_test<SrcScalar, bool>::run(); + casting_test<SrcScalar, int8_t>::run(); + casting_test<SrcScalar, uint8_t>::run(); + casting_test<SrcScalar, int16_t>::run(); + casting_test<SrcScalar, uint16_t>::run(); + casting_test<SrcScalar, int32_t>::run(); + casting_test<SrcScalar, uint32_t>::run(); +#if EIGEN_HAS_CXX11 + casting_test<SrcScalar, int64_t>::run(); + casting_test<SrcScalar, uint64_t>::run(); +#endif + casting_test<SrcScalar, half>::run(); + casting_test<SrcScalar, bfloat16>::run(); + casting_test<SrcScalar, float>::run(); + casting_test<SrcScalar, double>::run(); + casting_test<SrcScalar, std::complex<float> >::run(); + casting_test<SrcScalar, std::complex<double> >::run(); + } +}; + +template<typename SrcScalar> +struct casting_test_runner<SrcScalar, typename internal::enable_if<(NumTraits<SrcScalar>::IsComplex)>::type> { - Matrix4f m = Matrix4f::Random(), m2; - Matrix4d n = m.cast<double>(); - VERIFY(m.isApprox(n.cast<float>())); - m2 = m.cast<float>(); // check the specialization when NewType == Type - VERIFY(m.isApprox(m2)); -} + static void run() { + // Only a few casts from std::complex<T> are defined. + casting_test<SrcScalar, half>::run(); + casting_test<SrcScalar, bfloat16>::run(); + casting_test<SrcScalar, std::complex<float> >::run(); + casting_test<SrcScalar, std::complex<double> >::run(); + } +}; + +void casting_all() { + casting_test_runner<bool>::run(); + casting_test_runner<int8_t>::run(); + casting_test_runner<uint8_t>::run(); + casting_test_runner<int16_t>::run(); + casting_test_runner<uint16_t>::run(); + casting_test_runner<int32_t>::run(); + casting_test_runner<uint32_t>::run(); +#if EIGEN_HAS_CXX11 + casting_test_runner<int64_t>::run(); + casting_test_runner<uint64_t>::run(); #endif + casting_test_runner<half>::run(); + casting_test_runner<bfloat16>::run(); + casting_test_runner<float>::run(); + casting_test_runner<double>::run(); + casting_test_runner<std::complex<float> >::run(); + casting_test_runner<std::complex<double> >::run(); +} template <typename Scalar> void fixedSizeMatrixConstruction() @@ -197,12 +274,12 @@ void fixedSizeMatrixConstruction() Scalar raw[4]; for(int k=0; k<4; ++k) raw[k] = internal::random<Scalar>(); - + { Matrix<Scalar,4,1> m(raw); Array<Scalar,4,1> a(raw); for(int k=0; k<4; ++k) VERIFY(m(k) == raw[k]); - for(int k=0; k<4; ++k) VERIFY(a(k) == raw[k]); + for(int k=0; k<4; ++k) VERIFY(a(k) == raw[k]); VERIFY_IS_EQUAL(m,(Matrix<Scalar,4,1>(raw[0],raw[1],raw[2],raw[3]))); VERIFY((a==(Array<Scalar,4,1>(raw[0],raw[1],raw[2],raw[3]))).all()); } @@ -254,7 +331,7 @@ void fixedSizeMatrixConstruction() } } -void test_basicstuff() +EIGEN_DECLARE_TEST(basicstuff) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( basicStuff(Matrix<float, 1, 1>()) ); @@ -264,6 +341,7 @@ void test_basicstuff() CALL_SUBTEST_5( basicStuff(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( basicStuff(Matrix<float, 100, 100>()) ); CALL_SUBTEST_7( basicStuff(Matrix<long double,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_8( casting_all() ); CALL_SUBTEST_3( basicStuffComplex(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( basicStuffComplex(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); @@ -275,6 +353,4 @@ void test_basicstuff() CALL_SUBTEST_1(fixedSizeMatrixConstruction<int>()); CALL_SUBTEST_1(fixedSizeMatrixConstruction<long int>()); CALL_SUBTEST_1(fixedSizeMatrixConstruction<std::ptrdiff_t>()); - - CALL_SUBTEST_2(casting()); } diff --git a/test/bdcsvd.cpp b/test/bdcsvd.cpp index f9f687aac..e92a7dc97 100644 --- a/test/bdcsvd.cpp +++ b/test/bdcsvd.cpp @@ -28,9 +28,13 @@ template<typename MatrixType> void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true) { - MatrixType m = a; - if(pickrandom) + MatrixType m; + if(pickrandom) { + m.resizeLike(a); svd_fill_random(m); + } + else + m = a; CALL_SUBTEST(( svd_test_all_computation_options<BDCSVD<MatrixType> >(m, false) )); } @@ -46,6 +50,8 @@ void bdcsvd_method() VERIFY_RAISES_ASSERT(m.bdcSvd().matrixU()); VERIFY_RAISES_ASSERT(m.bdcSvd().matrixV()); VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); } // compare the Singular values returned with Jacobi and Bdc @@ -62,7 +68,7 @@ void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computa if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); } -void test_bdcsvd() +EIGEN_DECLARE_TEST(bdcsvd) { CALL_SUBTEST_3(( svd_verify_assert<BDCSVD<Matrix3f> >(Matrix3f()) )); CALL_SUBTEST_4(( svd_verify_assert<BDCSVD<Matrix4d> >(Matrix4d()) )); @@ -104,7 +110,8 @@ void test_bdcsvd() CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) ); // Check that preallocation avoids subsequent mallocs - CALL_SUBTEST_9( svd_preallocate<void>() ); + // Disabled because not supported by BDCSVD + // CALL_SUBTEST_9( svd_preallocate<void>() ); CALL_SUBTEST_2( svd_underoverflow<void>() ); } diff --git a/test/bfloat16_float.cpp b/test/bfloat16_float.cpp new file mode 100644 index 000000000..c3de0b19a --- /dev/null +++ b/test/bfloat16_float.cpp @@ -0,0 +1,378 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include <sstream> +#include <memory> +#include <math.h> + +#include "main.h" + +#include <Eigen/src/Core/arch/Default/BFloat16.h> + +#define VERIFY_BFLOAT16_BITS_EQUAL(h, bits) \ + VERIFY_IS_EQUAL((numext::bit_cast<numext::uint16_t>(h)), (static_cast<numext::uint16_t>(bits))) + +// Make sure it's possible to forward declare Eigen::bfloat16 +namespace Eigen { +struct bfloat16; +} + +using Eigen::bfloat16; + +float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa, + uint32_t low_mantissa) { + float dest; + uint32_t src = (sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa; + memcpy(static_cast<void*>(&dest), + static_cast<const void*>(&src), sizeof(dest)); + return dest; +} + +template<typename T> + void test_roundtrip() { + // Representable T round trip via bfloat16 + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(-std::numeric_limits<T>::infinity()))), -std::numeric_limits<T>::infinity()); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(std::numeric_limits<T>::infinity()))), std::numeric_limits<T>::infinity()); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(T(-1.0)))), T(-1.0)); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(T(-0.5)))), T(-0.5)); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(T(-0.0)))), T(-0.0)); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(T(1.0)))), T(1.0)); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(T(0.5)))), T(0.5)); + VERIFY_IS_EQUAL((internal::cast<bfloat16,T>(internal::cast<T,bfloat16>(T(0.0)))), T(0.0)); +} + +void test_conversion() +{ + using Eigen::bfloat16_impl::__bfloat16_raw; + + // Round-trip casts + VERIFY_IS_EQUAL( + numext::bit_cast<bfloat16>(numext::bit_cast<numext::uint16_t>(bfloat16(1.0f))), + bfloat16(1.0f)); + VERIFY_IS_EQUAL( + numext::bit_cast<bfloat16>(numext::bit_cast<numext::uint16_t>(bfloat16(0.5f))), + bfloat16(0.5f)); + VERIFY_IS_EQUAL( + numext::bit_cast<bfloat16>(numext::bit_cast<numext::uint16_t>(bfloat16(-0.33333f))), + bfloat16(-0.33333f)); + VERIFY_IS_EQUAL( + numext::bit_cast<bfloat16>(numext::bit_cast<numext::uint16_t>(bfloat16(0.0f))), + bfloat16(0.0f)); + + // Conversion from float. + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(1.0f), 0x3f80); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f), 0x3f00); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.33333f), 0x3eab); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3.38e38f), 0x7f7e); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3.40e38f), 0x7f80); // Becomes infinity. + + // Verify round-to-nearest-even behavior. + float val1 = static_cast<float>(bfloat16(__bfloat16_raw(0x3c00))); + float val2 = static_cast<float>(bfloat16(__bfloat16_raw(0x3c01))); + float val3 = static_cast<float>(bfloat16(__bfloat16_raw(0x3c02))); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f * (val1 + val2)), 0x3c00); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f * (val2 + val3)), 0x3c02); + + // Conversion from int. + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(-1), 0xbf80); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0), 0x0000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(1), 0x3f80); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(2), 0x4000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3), 0x4040); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(12), 0x4140); + + // Conversion from bool. + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(false), 0x0000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(true), 0x3f80); + + // Conversion to bool + VERIFY_IS_EQUAL(static_cast<bool>(bfloat16(3)), true); + VERIFY_IS_EQUAL(static_cast<bool>(bfloat16(0.33333f)), true); + VERIFY_IS_EQUAL(bfloat16(-0.0), false); + VERIFY_IS_EQUAL(static_cast<bool>(bfloat16(0.0)), false); + + // Explicit conversion to float. + VERIFY_IS_EQUAL(static_cast<float>(bfloat16(__bfloat16_raw(0x0000))), 0.0f); + VERIFY_IS_EQUAL(static_cast<float>(bfloat16(__bfloat16_raw(0x3f80))), 1.0f); + + // Implicit conversion to float + VERIFY_IS_EQUAL(bfloat16(__bfloat16_raw(0x0000)), 0.0f); + VERIFY_IS_EQUAL(bfloat16(__bfloat16_raw(0x3f80)), 1.0f); + + // Zero representations + VERIFY_IS_EQUAL(bfloat16(0.0f), bfloat16(0.0f)); + VERIFY_IS_EQUAL(bfloat16(-0.0f), bfloat16(0.0f)); + VERIFY_IS_EQUAL(bfloat16(-0.0f), bfloat16(-0.0f)); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.0f), 0x0000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(-0.0f), 0x8000); + + // Default is zero + VERIFY_IS_EQUAL(static_cast<float>(bfloat16()), 0.0f); + + // Representable floats round trip via bfloat16 + test_roundtrip<float>(); + test_roundtrip<double>(); + test_roundtrip<std::complex<float> >(); + test_roundtrip<std::complex<double> >(); + + // Conversion + Array<float,1,100> a; + for (int i = 0; i < 100; i++) a(i) = i + 1.25; + Array<bfloat16,1,100> b = a.cast<bfloat16>(); + Array<float,1,100> c = b.cast<float>(); + for (int i = 0; i < 100; ++i) { + VERIFY_LE(numext::abs(c(i) - a(i)), a(i) / 128); + } + + // Epsilon + VERIFY_LE(1.0f, static_cast<float>((std::numeric_limits<bfloat16>::epsilon)() + bfloat16(1.0f))); + VERIFY_IS_EQUAL(1.0f, static_cast<float>((std::numeric_limits<bfloat16>::epsilon)() / bfloat16(2.0f) + bfloat16(1.0f))); + + // Negate + VERIFY_IS_EQUAL(static_cast<float>(-bfloat16(3.0f)), -3.0f); + VERIFY_IS_EQUAL(static_cast<float>(-bfloat16(-4.5f)), 4.5f); + + +#if !EIGEN_COMP_MSVC + // Visual Studio errors out on divisions by 0 + VERIFY((numext::isnan)(static_cast<float>(bfloat16(0.0 / 0.0)))); + VERIFY((numext::isinf)(static_cast<float>(bfloat16(1.0 / 0.0)))); + VERIFY((numext::isinf)(static_cast<float>(bfloat16(-1.0 / 0.0)))); + + // Visual Studio errors out on divisions by 0 + VERIFY((numext::isnan)(bfloat16(0.0 / 0.0))); + VERIFY((numext::isinf)(bfloat16(1.0 / 0.0))); + VERIFY((numext::isinf)(bfloat16(-1.0 / 0.0))); +#endif + + // NaNs and infinities. + VERIFY(!(numext::isinf)(static_cast<float>(bfloat16(3.38e38f)))); // Largest finite number. + VERIFY(!(numext::isnan)(static_cast<float>(bfloat16(0.0f)))); + VERIFY((numext::isinf)(static_cast<float>(bfloat16(__bfloat16_raw(0xff80))))); + VERIFY((numext::isnan)(static_cast<float>(bfloat16(__bfloat16_raw(0xffc0))))); + VERIFY((numext::isinf)(static_cast<float>(bfloat16(__bfloat16_raw(0x7f80))))); + VERIFY((numext::isnan)(static_cast<float>(bfloat16(__bfloat16_raw(0x7fc0))))); + + // Exactly same checks as above, just directly on the bfloat16 representation. + VERIFY(!(numext::isinf)(bfloat16(__bfloat16_raw(0x7bff)))); + VERIFY(!(numext::isnan)(bfloat16(__bfloat16_raw(0x0000)))); + VERIFY((numext::isinf)(bfloat16(__bfloat16_raw(0xff80)))); + VERIFY((numext::isnan)(bfloat16(__bfloat16_raw(0xffc0)))); + VERIFY((numext::isinf)(bfloat16(__bfloat16_raw(0x7f80)))); + VERIFY((numext::isnan)(bfloat16(__bfloat16_raw(0x7fc0)))); + + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(BinaryToFloat(0x0, 0xff, 0x40, 0x0)), 0x7fc0); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(BinaryToFloat(0x1, 0xff, 0x40, 0x0)), 0xffc0); +} + +void test_numtraits() +{ + std::cout << "epsilon = " << NumTraits<bfloat16>::epsilon() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<bfloat16>::epsilon()) << ")" << std::endl; + std::cout << "highest = " << NumTraits<bfloat16>::highest() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<bfloat16>::highest()) << ")" << std::endl; + std::cout << "lowest = " << NumTraits<bfloat16>::lowest() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<bfloat16>::lowest()) << ")" << std::endl; + std::cout << "min = " << (std::numeric_limits<bfloat16>::min)() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>((std::numeric_limits<bfloat16>::min)()) << ")" << std::endl; + std::cout << "denorm min = " << (std::numeric_limits<bfloat16>::denorm_min)() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>((std::numeric_limits<bfloat16>::denorm_min)()) << ")" << std::endl; + std::cout << "infinity = " << NumTraits<bfloat16>::infinity() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<bfloat16>::infinity()) << ")" << std::endl; + std::cout << "quiet nan = " << NumTraits<bfloat16>::quiet_NaN() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<bfloat16>::quiet_NaN()) << ")" << std::endl; + std::cout << "signaling nan = " << std::numeric_limits<bfloat16>::signaling_NaN() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(std::numeric_limits<bfloat16>::signaling_NaN()) << ")" << std::endl; + + VERIFY(NumTraits<bfloat16>::IsSigned); + + VERIFY_IS_EQUAL( + numext::bit_cast<numext::uint16_t>(std::numeric_limits<bfloat16>::infinity()), + numext::bit_cast<numext::uint16_t>(bfloat16(std::numeric_limits<float>::infinity())) ); + // There is no guarantee that casting a 32-bit NaN to bfloat16 has a precise + // bit pattern. We test that it is in fact a NaN, then test the signaling + // bit (msb of significand is 1 for quiet, 0 for signaling). + const numext::uint16_t BFLOAT16_QUIET_BIT = 0x0040; + VERIFY( + (numext::isnan)(std::numeric_limits<bfloat16>::quiet_NaN()) + && (numext::isnan)(bfloat16(std::numeric_limits<float>::quiet_NaN())) + && ((numext::bit_cast<numext::uint16_t>(std::numeric_limits<bfloat16>::quiet_NaN()) & BFLOAT16_QUIET_BIT) > 0) + && ((numext::bit_cast<numext::uint16_t>(bfloat16(std::numeric_limits<float>::quiet_NaN())) & BFLOAT16_QUIET_BIT) > 0) ); + // After a cast to bfloat16, a signaling NaN may become non-signaling. Thus, + // we check that both are NaN, and that only the `numeric_limits` version is + // signaling. + VERIFY( + (numext::isnan)(std::numeric_limits<bfloat16>::signaling_NaN()) + && (numext::isnan)(bfloat16(std::numeric_limits<float>::signaling_NaN())) + && ((numext::bit_cast<numext::uint16_t>(std::numeric_limits<bfloat16>::signaling_NaN()) & BFLOAT16_QUIET_BIT) == 0) ); + + VERIFY( (std::numeric_limits<bfloat16>::min)() > bfloat16(0.f) ); + VERIFY( (std::numeric_limits<bfloat16>::denorm_min)() > bfloat16(0.f) ); + VERIFY_IS_EQUAL( (std::numeric_limits<bfloat16>::denorm_min)()/bfloat16(2), bfloat16(0.f) ); +} + +void test_arithmetic() +{ + VERIFY_IS_EQUAL(static_cast<float>(bfloat16(2) + bfloat16(2)), 4); + VERIFY_IS_EQUAL(static_cast<float>(bfloat16(2) + bfloat16(-2)), 0); + VERIFY_IS_APPROX(static_cast<float>(bfloat16(0.33333f) + bfloat16(0.66667f)), 1.0f); + VERIFY_IS_EQUAL(static_cast<float>(bfloat16(2.0f) * bfloat16(-5.5f)), -11.0f); + VERIFY_IS_APPROX(static_cast<float>(bfloat16(1.0f) / bfloat16(3.0f)), 0.3339f); + VERIFY_IS_EQUAL(static_cast<float>(-bfloat16(4096.0f)), -4096.0f); + VERIFY_IS_EQUAL(static_cast<float>(-bfloat16(-4096.0f)), 4096.0f); +} + +void test_comparison() +{ + VERIFY(bfloat16(1.0f) > bfloat16(0.5f)); + VERIFY(bfloat16(0.5f) < bfloat16(1.0f)); + VERIFY(!(bfloat16(1.0f) < bfloat16(0.5f))); + VERIFY(!(bfloat16(0.5f) > bfloat16(1.0f))); + + VERIFY(!(bfloat16(4.0f) > bfloat16(4.0f))); + VERIFY(!(bfloat16(4.0f) < bfloat16(4.0f))); + + VERIFY(!(bfloat16(0.0f) < bfloat16(-0.0f))); + VERIFY(!(bfloat16(-0.0f) < bfloat16(0.0f))); + VERIFY(!(bfloat16(0.0f) > bfloat16(-0.0f))); + VERIFY(!(bfloat16(-0.0f) > bfloat16(0.0f))); + + VERIFY(bfloat16(0.2f) > bfloat16(-1.0f)); + VERIFY(bfloat16(-1.0f) < bfloat16(0.2f)); + VERIFY(bfloat16(-16.0f) < bfloat16(-15.0f)); + + VERIFY(bfloat16(1.0f) == bfloat16(1.0f)); + VERIFY(bfloat16(1.0f) != bfloat16(2.0f)); + + // Comparisons with NaNs and infinities. +#if !EIGEN_COMP_MSVC + // Visual Studio errors out on divisions by 0 + VERIFY(!(bfloat16(0.0 / 0.0) == bfloat16(0.0 / 0.0))); + VERIFY(bfloat16(0.0 / 0.0) != bfloat16(0.0 / 0.0)); + + VERIFY(!(bfloat16(1.0) == bfloat16(0.0 / 0.0))); + VERIFY(!(bfloat16(1.0) < bfloat16(0.0 / 0.0))); + VERIFY(!(bfloat16(1.0) > bfloat16(0.0 / 0.0))); + VERIFY(bfloat16(1.0) != bfloat16(0.0 / 0.0)); + + VERIFY(bfloat16(1.0) < bfloat16(1.0 / 0.0)); + VERIFY(bfloat16(1.0) > bfloat16(-1.0 / 0.0)); +#endif +} + +void test_basic_functions() +{ + VERIFY_IS_EQUAL(static_cast<float>(numext::abs(bfloat16(3.5f))), 3.5f); + VERIFY_IS_EQUAL(static_cast<float>(abs(bfloat16(3.5f))), 3.5f); + VERIFY_IS_EQUAL(static_cast<float>(numext::abs(bfloat16(-3.5f))), 3.5f); + VERIFY_IS_EQUAL(static_cast<float>(abs(bfloat16(-3.5f))), 3.5f); + + VERIFY_IS_EQUAL(static_cast<float>(numext::floor(bfloat16(3.5f))), 3.0f); + VERIFY_IS_EQUAL(static_cast<float>(floor(bfloat16(3.5f))), 3.0f); + VERIFY_IS_EQUAL(static_cast<float>(numext::floor(bfloat16(-3.5f))), -4.0f); + VERIFY_IS_EQUAL(static_cast<float>(floor(bfloat16(-3.5f))), -4.0f); + + VERIFY_IS_EQUAL(static_cast<float>(numext::ceil(bfloat16(3.5f))), 4.0f); + VERIFY_IS_EQUAL(static_cast<float>(ceil(bfloat16(3.5f))), 4.0f); + VERIFY_IS_EQUAL(static_cast<float>(numext::ceil(bfloat16(-3.5f))), -3.0f); + VERIFY_IS_EQUAL(static_cast<float>(ceil(bfloat16(-3.5f))), -3.0f); + + VERIFY_IS_APPROX(static_cast<float>(numext::sqrt(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(sqrt(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(numext::sqrt(bfloat16(4.0f))), 2.0f); + VERIFY_IS_APPROX(static_cast<float>(sqrt(bfloat16(4.0f))), 2.0f); + + VERIFY_IS_APPROX(static_cast<float>(numext::pow(bfloat16(0.0f), bfloat16(1.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(pow(bfloat16(0.0f), bfloat16(1.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(numext::pow(bfloat16(2.0f), bfloat16(2.0f))), 4.0f); + VERIFY_IS_APPROX(static_cast<float>(pow(bfloat16(2.0f), bfloat16(2.0f))), 4.0f); + + VERIFY_IS_EQUAL(static_cast<float>(numext::exp(bfloat16(0.0f))), 1.0f); + VERIFY_IS_EQUAL(static_cast<float>(exp(bfloat16(0.0f))), 1.0f); + VERIFY_IS_APPROX(static_cast<float>(numext::exp(bfloat16(EIGEN_PI))), 20.f + static_cast<float>(EIGEN_PI)); + VERIFY_IS_APPROX(static_cast<float>(exp(bfloat16(EIGEN_PI))), 20.f + static_cast<float>(EIGEN_PI)); + + VERIFY_IS_EQUAL(static_cast<float>(numext::expm1(bfloat16(0.0f))), 0.0f); + VERIFY_IS_EQUAL(static_cast<float>(expm1(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(numext::expm1(bfloat16(2.0f))), 6.375f); + VERIFY_IS_APPROX(static_cast<float>(expm1(bfloat16(2.0f))), 6.375f); + + VERIFY_IS_EQUAL(static_cast<float>(numext::log(bfloat16(1.0f))), 0.0f); + VERIFY_IS_EQUAL(static_cast<float>(log(bfloat16(1.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(numext::log(bfloat16(10.0f))), 2.296875f); + VERIFY_IS_APPROX(static_cast<float>(log(bfloat16(10.0f))), 2.296875f); + + VERIFY_IS_EQUAL(static_cast<float>(numext::log1p(bfloat16(0.0f))), 0.0f); + VERIFY_IS_EQUAL(static_cast<float>(log1p(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast<float>(numext::log1p(bfloat16(10.0f))), 2.390625f); + VERIFY_IS_APPROX(static_cast<float>(log1p(bfloat16(10.0f))), 2.390625f); +} + +void test_trigonometric_functions() +{ + VERIFY_IS_APPROX(numext::cos(bfloat16(0.0f)), bfloat16(cosf(0.0f))); + VERIFY_IS_APPROX(cos(bfloat16(0.0f)), bfloat16(cosf(0.0f))); + VERIFY_IS_APPROX(numext::cos(bfloat16(EIGEN_PI)), bfloat16(cosf(EIGEN_PI))); + // VERIFY_IS_APPROX(numext::cos(bfloat16(EIGEN_PI/2)), bfloat16(cosf(EIGEN_PI/2))); + // VERIFY_IS_APPROX(numext::cos(bfloat16(3*EIGEN_PI/2)), bfloat16(cosf(3*EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::cos(bfloat16(3.5f)), bfloat16(cosf(3.5f))); + + VERIFY_IS_APPROX(numext::sin(bfloat16(0.0f)), bfloat16(sinf(0.0f))); + VERIFY_IS_APPROX(sin(bfloat16(0.0f)), bfloat16(sinf(0.0f))); + // VERIFY_IS_APPROX(numext::sin(bfloat16(EIGEN_PI)), bfloat16(sinf(EIGEN_PI))); + VERIFY_IS_APPROX(numext::sin(bfloat16(EIGEN_PI/2)), bfloat16(sinf(EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::sin(bfloat16(3*EIGEN_PI/2)), bfloat16(sinf(3*EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::sin(bfloat16(3.5f)), bfloat16(sinf(3.5f))); + + VERIFY_IS_APPROX(numext::tan(bfloat16(0.0f)), bfloat16(tanf(0.0f))); + VERIFY_IS_APPROX(tan(bfloat16(0.0f)), bfloat16(tanf(0.0f))); + // VERIFY_IS_APPROX(numext::tan(bfloat16(EIGEN_PI)), bfloat16(tanf(EIGEN_PI))); + // VERIFY_IS_APPROX(numext::tan(bfloat16(EIGEN_PI/2)), bfloat16(tanf(EIGEN_PI/2))); + // VERIFY_IS_APPROX(numext::tan(bfloat16(3*EIGEN_PI/2)), bfloat16(tanf(3*EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::tan(bfloat16(3.5f)), bfloat16(tanf(3.5f))); +} + +void test_array() +{ + typedef Array<bfloat16,1,Dynamic> ArrayXh; + Index size = internal::random<Index>(1,10); + Index i = internal::random<Index>(0,size-1); + ArrayXh a1 = ArrayXh::Random(size), a2 = ArrayXh::Random(size); + VERIFY_IS_APPROX( a1+a1, bfloat16(2)*a1 ); + VERIFY( (a1.abs() >= bfloat16(0)).all() ); + VERIFY_IS_APPROX( (a1*a1).sqrt(), a1.abs() ); + + VERIFY( ((a1.min)(a2) <= (a1.max)(a2)).all() ); + a1(i) = bfloat16(-10.); + VERIFY_IS_EQUAL( a1.minCoeff(), bfloat16(-10.) ); + a1(i) = bfloat16(10.); + VERIFY_IS_EQUAL( a1.maxCoeff(), bfloat16(10.) ); + + std::stringstream ss; + ss << a1; +} + +void test_product() +{ + typedef Matrix<bfloat16,Dynamic,Dynamic> MatrixXh; + Index rows = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + Index cols = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + MatrixXh Ah = MatrixXh::Random(rows,depth); + MatrixXh Bh = MatrixXh::Random(depth,cols); + MatrixXh Ch = MatrixXh::Random(rows,cols); + MatrixXf Af = Ah.cast<float>(); + MatrixXf Bf = Bh.cast<float>(); + MatrixXf Cf = Ch.cast<float>(); + VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast<bfloat16>()); +} + +EIGEN_DECLARE_TEST(bfloat16_float) +{ + CALL_SUBTEST(test_numtraits()); + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST(test_conversion()); + CALL_SUBTEST(test_arithmetic()); + CALL_SUBTEST(test_comparison()); + CALL_SUBTEST(test_basic_functions()); + CALL_SUBTEST(test_trigonometric_functions()); + CALL_SUBTEST(test_array()); + CALL_SUBTEST(test_product()); + } +} diff --git a/test/bicgstab.cpp b/test/bicgstab.cpp index 4cc0dd31c..59c4b501c 100644 --- a/test/bicgstab.cpp +++ b/test/bicgstab.cpp @@ -10,11 +10,11 @@ #include "sparse_solver.h" #include <Eigen/IterativeLinearSolvers> -template<typename T, typename I> void test_bicgstab_T() +template<typename T, typename I_> void test_bicgstab_T() { - BiCGSTAB<SparseMatrix<T,0,I>, DiagonalPreconditioner<T> > bicgstab_colmajor_diag; - BiCGSTAB<SparseMatrix<T,0,I>, IdentityPreconditioner > bicgstab_colmajor_I; - BiCGSTAB<SparseMatrix<T,0,I>, IncompleteLUT<T,I> > bicgstab_colmajor_ilut; + BiCGSTAB<SparseMatrix<T,0,I_>, DiagonalPreconditioner<T> > bicgstab_colmajor_diag; + BiCGSTAB<SparseMatrix<T,0,I_>, IdentityPreconditioner > bicgstab_colmajor_I; + BiCGSTAB<SparseMatrix<T,0,I_>, IncompleteLUT<T,I_> > bicgstab_colmajor_ilut; //BiCGSTAB<SparseMatrix<T>, SSORPreconditioner<T> > bicgstab_colmajor_ssor; bicgstab_colmajor_diag.setTolerance(NumTraits<T>::epsilon()*4); @@ -26,7 +26,7 @@ template<typename T, typename I> void test_bicgstab_T() //CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ssor) ); } -void test_bicgstab() +EIGEN_DECLARE_TEST(bicgstab) { CALL_SUBTEST_1((test_bicgstab_T<double,int>()) ); CALL_SUBTEST_2((test_bicgstab_T<std::complex<double>, int>())); diff --git a/test/blasutil.cpp b/test/blasutil.cpp new file mode 100644 index 000000000..845a498d6 --- /dev/null +++ b/test/blasutil.cpp @@ -0,0 +1,210 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020 Everton Constantino <everton.constantino@ibm.com> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/ + +#include "main.h" + +// Disable "ignoring attributes on template argument" +// for packet_traits<Packet*> +// => The only workaround would be to wrap _m128 and the likes +// within wrappers. +#if EIGEN_GNUC_AT_LEAST(6,0) + #pragma GCC diagnostic ignored "-Wignored-attributes" +#endif + +#define GET(i,j) (StorageOrder == RowMajor ? (i)*stride + (j) : (i) + (j)*stride) +#define SCATTER(i,j,k) (StorageOrder == RowMajor ? ((i)+(k))*stride + (j) : (i) + ((j)+(k))*stride) + +template<typename Scalar, typename Packet> +void compare(const Packet& a, const Packet& b) +{ + int pktsz = internal::packet_traits<Scalar>::size; + Scalar *buffA = new Scalar[pktsz]; + Scalar *buffB = new Scalar[pktsz]; + + internal::pstoreu<Scalar, Packet>(buffA, a); + internal::pstoreu<Scalar, Packet>(buffB, b); + + for(int i = 0; i < pktsz; i++) + { + VERIFY_IS_EQUAL(buffA[i], buffB[i]); + } + + delete[] buffA; + delete[] buffB; +} + +template<typename Scalar, int StorageOrder, int n> +struct PacketBlockSet +{ + typedef typename internal::packet_traits<Scalar>::type Packet; + + void setPacketBlock(internal::PacketBlock<Packet,n>& block, Scalar value) + { + for(int idx = 0; idx < n; idx++) + { + block.packet[idx] = internal::pset1<Packet>(value); + } + } + + void comparePacketBlock(Scalar *data, int i, int j, int stride, internal::PacketBlock<Packet, n>& block) + { + for(int idx = 0; idx < n; idx++) + { + Packet line = internal::ploadu<Packet>(data + SCATTER(i,j,idx)); + compare<Scalar, Packet>(block.packet[idx], line); + } + } +}; + +template<typename Scalar, int StorageOrder, int BlockSize> +void run_bdmp_spec_1() +{ + typedef internal::blas_data_mapper<Scalar, int, StorageOrder> BlasDataMapper; + int packetSize = internal::packet_traits<Scalar>::size; + int minSize = std::max<int>(packetSize, BlockSize); + typedef typename internal::packet_traits<Scalar>::type Packet; + + int szm = internal::random<int>(minSize,500), szn = internal::random<int>(minSize,500); + int stride = StorageOrder == RowMajor ? szn : szm; + Scalar *d = new Scalar[szn*szm]; + + // Initializing with random entries + for(int i = 0; i < szm*szn; i++) + { + d[i] = internal::random<Scalar>(static_cast<Scalar>(3), static_cast<Scalar>(10)); + } + + BlasDataMapper bdm(d, stride); + + // Testing operator() + for(int i = 0; i < szm; i++) + { + for(int j = 0; j < szn; j++) + { + VERIFY_IS_EQUAL(d[GET(i,j)], bdm(i,j)); + } + } + + // Testing getSubMapper and getLinearMapper + int i0 = internal::random<int>(0,szm-2); + int j0 = internal::random<int>(0,szn-2); + for(int i = i0; i < szm; i++) + { + for(int j = j0; j < szn; j++) + { + const BlasDataMapper& bdmSM = bdm.getSubMapper(i0,j0); + const internal::BlasLinearMapper<Scalar, int, 0>& bdmLM = bdm.getLinearMapper(i0,j0); + + Scalar v = bdmSM(i - i0, j - j0); + Scalar vd = d[GET(i,j)]; + VERIFY_IS_EQUAL(vd, v); + VERIFY_IS_EQUAL(vd, bdmLM(GET(i-i0, j-j0))); + } + } + + // Testing loadPacket + for(int i = 0; i < szm - minSize; i++) + { + for(int j = 0; j < szn - minSize; j++) + { + Packet pktBDM = bdm.template loadPacket<Packet>(i,j); + Packet pktD = internal::ploadu<Packet>(d + GET(i,j)); + + compare<Scalar, Packet>(pktBDM, pktD); + } + } + + // Testing gatherPacket + Scalar *buff = new Scalar[packetSize]; + for(int i = 0; i < szm - minSize; i++) + { + for(int j = 0; j < szn - minSize; j++) + { + Packet p = bdm.template gatherPacket<Packet>(i,j); + internal::pstoreu<Scalar, Packet>(buff, p); + + for(int k = 0; k < packetSize; k++) + { + VERIFY_IS_EQUAL(d[SCATTER(i,j,k)], buff[k]); + } + + } + } + delete[] buff; + + // Testing scatterPacket + for(int i = 0; i < szm - minSize; i++) + { + for(int j = 0; j < szn - minSize; j++) + { + Packet p = internal::pset1<Packet>(static_cast<Scalar>(1)); + bdm.template scatterPacket<Packet>(i,j,p); + for(int k = 0; k < packetSize; k++) + { + VERIFY_IS_EQUAL(d[SCATTER(i,j,k)], static_cast<Scalar>(1)); + } + } + } + + //Testing storePacketBlock + internal::PacketBlock<Packet, BlockSize> block; + + PacketBlockSet<Scalar, StorageOrder, BlockSize> pbs; + pbs.setPacketBlock(block, static_cast<Scalar>(2)); + + for(int i = 0; i < szm - minSize; i++) + { + for(int j = 0; j < szn - minSize; j++) + { + bdm.template storePacketBlock<Packet, BlockSize>(i, j, block); + + pbs.comparePacketBlock(d, i, j, stride, block); + } + } + + delete[] d; +} + +template<typename Scalar> +void run_test() +{ + run_bdmp_spec_1<Scalar, RowMajor, 1>(); + run_bdmp_spec_1<Scalar, ColMajor, 1>(); + run_bdmp_spec_1<Scalar, RowMajor, 2>(); + run_bdmp_spec_1<Scalar, ColMajor, 2>(); + run_bdmp_spec_1<Scalar, RowMajor, 4>(); + run_bdmp_spec_1<Scalar, ColMajor, 4>(); + run_bdmp_spec_1<Scalar, RowMajor, 8>(); + run_bdmp_spec_1<Scalar, ColMajor, 8>(); + run_bdmp_spec_1<Scalar, RowMajor, 16>(); + run_bdmp_spec_1<Scalar, ColMajor, 16>(); +} + +EIGEN_DECLARE_TEST(blasutil) +{ + for(int i = 0; i < g_repeat; i++) + { + CALL_SUBTEST_1(run_test<numext::int8_t>()); + CALL_SUBTEST_2(run_test<numext::int16_t>()); + CALL_SUBTEST_3(run_test<numext::int32_t>()); + +// TODO: Replace this by a call to numext::int64_t as soon as we have a way to +// detect the typedef for int64_t on all platforms +#if EIGEN_HAS_CXX11 + CALL_SUBTEST_4(run_test<signed long long>()); +#else + CALL_SUBTEST_4(run_test<signed long>()); +#endif + + CALL_SUBTEST_5(run_test<float_t>()); + CALL_SUBTEST_6(run_test<double_t>()); + CALL_SUBTEST_7(run_test<std::complex<float> >()); + CALL_SUBTEST_8(run_test<std::complex<double> >()); + } +} diff --git a/test/block.cpp b/test/block.cpp index 39565af83..84124aba6 100644 --- a/test/block.cpp +++ b/test/block.cpp @@ -29,15 +29,21 @@ block_real_only(const MatrixType &, Index, Index, Index, Index, const Scalar&) { return Scalar(0); } +// Check at compile-time that T1==T2, and at runtime-time that a==b +template<typename T1,typename T2> +typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type +is_same_block(const T1& a, const T2& b) +{ + return a.isApprox(b); +} template<typename MatrixType> void block(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType; - typedef Matrix<Scalar, Dynamic, Dynamic> DynamicMatrixType; + typedef Matrix<Scalar, Dynamic, Dynamic, MatrixType::IsRowMajor?RowMajor:ColMajor> DynamicMatrixType; typedef Matrix<Scalar, Dynamic, 1> DynamicVectorType; Index rows = m.rows(); @@ -87,10 +93,9 @@ template<typename MatrixType> void block(const MatrixType& m) m1.block(r1,c1,r2-r1+1,c2-c1+1) = s1 * m2.block(0, 0, r2-r1+1,c2-c1+1); m1.block(r1,c1,r2-r1+1,c2-c1+1)(r2-r1,c2-c1) = m2.block(0, 0, r2-r1+1,c2-c1+1)(0,0); - enum { - BlockRows = 2, - BlockCols = 5 - }; + const Index BlockRows = 2; + const Index BlockCols = 5; + if (rows>=5 && cols>=8) { // test fixed block() as lvalue @@ -106,6 +111,11 @@ template<typename MatrixType> void block(const MatrixType& m) m1.template block<BlockRows,Dynamic>(1,1,BlockRows,BlockCols)(0,3) = m1.template block<2,5>(1,1)(1,2); Matrix<Scalar,Dynamic,Dynamic> b2 = m1.template block<Dynamic,BlockCols>(3,3,2,5); VERIFY_IS_EQUAL(b2, m1.block(3,3,BlockRows,BlockCols)); + + VERIFY(is_same_block(m1.block(3,3,BlockRows,BlockCols), m1.block(3,3,fix<Dynamic>(BlockRows),fix<Dynamic>(BlockCols)))); + VERIFY(is_same_block(m1.template block<BlockRows,Dynamic>(1,1,BlockRows,BlockCols), m1.block(1,1,fix<BlockRows>,BlockCols))); + VERIFY(is_same_block(m1.template block<BlockRows,BlockCols>(1,1,BlockRows,BlockCols), m1.block(1,1,fix<BlockRows>(),fix<BlockCols>))); + VERIFY(is_same_block(m1.template block<BlockRows,BlockCols>(1,1,BlockRows,BlockCols), m1.block(1,1,fix<BlockRows>,fix<BlockCols>(BlockCols)))); } if (rows>2) @@ -131,7 +141,7 @@ template<typename MatrixType> void block(const MatrixType& m) VERIFY(numext::real(ones.col(c1).dot(ones.col(c2))) == RealScalar(rows)); VERIFY(numext::real(ones.row(r1).dot(ones.row(r2))) == RealScalar(cols)); - // chekc that linear acccessors works on blocks + // check that linear acccessors works on blocks m1 = m1_copy; if((MatrixType::Flags&RowMajorBit)==0) VERIFY_IS_EQUAL(m1.leftCols(c1).coeff(r1+c1*rows), m1(r1,c1)); @@ -151,9 +161,25 @@ template<typename MatrixType> void block(const MatrixType& m) // expressions without direct access VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,rows-r1,cols-c1).block(r2-r1,c2-c1,rows-r2,cols-c2)) , ((m1+m2).block(r2,c2,rows-r2,cols-c2)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)) ); + VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).eval().row(r1).segment(c1,c2-c1+1)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).col(0)) , ((m1+m2).col(c1).segment(r1,r2-r1+1)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).transpose().col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() ); VERIFY_IS_APPROX( ((m1+m2).transpose().block(c1,r1,c2-c1+1,r2-r1+1).col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() ); + VERIFY_IS_APPROX( ((m1+m2).template block<Dynamic,1>(r1,c1,r2-r1+1,1)) , ((m1+m2).eval().col(c1).eval().segment(r1,r2-r1+1)) ); + VERIFY_IS_APPROX( ((m1+m2).template block<1,Dynamic>(r1,c1,1,c2-c1+1)) , ((m1+m2).eval().row(r1).eval().segment(c1,c2-c1+1)) ); + VERIFY_IS_APPROX( ((m1+m2).transpose().template block<1,Dynamic>(c1,r1,1,r2-r1+1)) , ((m1+m2).eval().col(c1).eval().segment(r1,r2-r1+1)).transpose() ); + VERIFY_IS_APPROX( (m1+m2).row(r1).eval(), (m1+m2).eval().row(r1) ); + VERIFY_IS_APPROX( (m1+m2).adjoint().col(r1).eval(), (m1+m2).adjoint().eval().col(r1) ); + VERIFY_IS_APPROX( (m1+m2).adjoint().row(c1).eval(), (m1+m2).adjoint().eval().row(c1) ); + VERIFY_IS_APPROX( (m1*1).row(r1).segment(c1,c2-c1+1).eval(), m1.row(r1).eval().segment(c1,c2-c1+1).eval() ); + VERIFY_IS_APPROX( m1.col(c1).reverse().segment(r1,r2-r1+1).eval(),m1.col(c1).reverse().eval().segment(r1,r2-r1+1).eval() ); + + VERIFY_IS_APPROX( (m1*1).topRows(r1), m1.topRows(r1) ); + VERIFY_IS_APPROX( (m1*1).leftCols(c1), m1.leftCols(c1) ); + VERIFY_IS_APPROX( (m1*1).transpose().topRows(c1), m1.transpose().topRows(c1) ); + VERIFY_IS_APPROX( (m1*1).transpose().leftCols(r1), m1.transpose().leftCols(r1) ); + VERIFY_IS_APPROX( (m1*1).transpose().middleRows(c1,c2-c1+1), m1.transpose().middleRows(c1,c2-c1+1) ); + VERIFY_IS_APPROX( (m1*1).transpose().middleCols(r1,r2-r1+1), m1.transpose().middleCols(r1,r2-r1+1) ); // evaluation into plain matrices from expressions with direct access (stress MapBase) DynamicMatrixType dm; @@ -194,13 +220,29 @@ template<typename MatrixType> void block(const MatrixType& m) VERIFY_RAISES_ASSERT( m1.array() *= m1.col(0).array() ); VERIFY_RAISES_ASSERT( m1.array() /= m1.col(0).array() ); } + + VERIFY_IS_EQUAL( m1.template subVector<Horizontal>(r1), m1.row(r1) ); + VERIFY_IS_APPROX( (m1+m1).template subVector<Horizontal>(r1), (m1+m1).row(r1) ); + VERIFY_IS_EQUAL( m1.template subVector<Vertical>(c1), m1.col(c1) ); + VERIFY_IS_APPROX( (m1+m1).template subVector<Vertical>(c1), (m1+m1).col(c1) ); + VERIFY_IS_EQUAL( m1.template subVectors<Horizontal>(), m1.rows() ); + VERIFY_IS_EQUAL( m1.template subVectors<Vertical>(), m1.cols() ); + + if (rows>=2 || cols>=2) { + VERIFY_IS_EQUAL( int(m1.middleCols(0,0).IsRowMajor), int(m1.IsRowMajor) ); + VERIFY_IS_EQUAL( m1.middleCols(0,0).outerSize(), m1.IsRowMajor ? rows : 0); + VERIFY_IS_EQUAL( m1.middleCols(0,0).innerSize(), m1.IsRowMajor ? 0 : rows); + + VERIFY_IS_EQUAL( int(m1.middleRows(0,0).IsRowMajor), int(m1.IsRowMajor) ); + VERIFY_IS_EQUAL( m1.middleRows(0,0).outerSize(), m1.IsRowMajor ? 0 : cols); + VERIFY_IS_EQUAL( m1.middleRows(0,0).innerSize(), m1.IsRowMajor ? cols : 0); + } } template<typename MatrixType> void compare_using_data_and_stride(const MatrixType& m) { - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); Index size = m.size(); @@ -234,7 +276,6 @@ void compare_using_data_and_stride(const MatrixType& m) template<typename MatrixType> void data_and_stride(const MatrixType& m) { - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -252,15 +293,18 @@ void data_and_stride(const MatrixType& m) compare_using_data_and_stride(m1.col(c1).transpose()); } -void test_block() +EIGEN_DECLARE_TEST(block) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( block(Matrix<float, 1, 1>()) ); + CALL_SUBTEST_1( block(Matrix<float, 1, Dynamic>(internal::random(2,50))) ); + CALL_SUBTEST_1( block(Matrix<float, Dynamic, 1>(internal::random(2,50))) ); CALL_SUBTEST_2( block(Matrix4d()) ); - CALL_SUBTEST_3( block(MatrixXcf(3, 3)) ); - CALL_SUBTEST_4( block(MatrixXi(8, 12)) ); - CALL_SUBTEST_5( block(MatrixXcd(20, 20)) ); - CALL_SUBTEST_6( block(MatrixXf(20, 20)) ); + CALL_SUBTEST_3( block(MatrixXcf(internal::random(2,50), internal::random(2,50))) ); + CALL_SUBTEST_4( block(MatrixXi(internal::random(2,50), internal::random(2,50))) ); + CALL_SUBTEST_5( block(MatrixXcd(internal::random(2,50), internal::random(2,50))) ); + CALL_SUBTEST_6( block(MatrixXf(internal::random(2,50), internal::random(2,50))) ); + CALL_SUBTEST_7( block(Matrix<int,Dynamic,Dynamic,RowMajor>(internal::random(2,50), internal::random(2,50))) ); CALL_SUBTEST_8( block(Matrix<float,Dynamic,4>(3, 4)) ); diff --git a/test/boostmultiprec.cpp b/test/boostmultiprec.cpp index e06e9bdaf..7c79ded23 100644 --- a/test/boostmultiprec.cpp +++ b/test/boostmultiprec.cpp @@ -55,6 +55,10 @@ #include "bdcsvd.cpp" #endif +#ifdef EIGEN_TEST_PART_11 +#include "simplicial_cholesky.cpp" +#endif + #include <Eigen/Dense> #undef min @@ -62,7 +66,9 @@ #undef isnan #undef isinf #undef isfinite +#undef I +#include <boost/serialization/nvp.hpp> #include <boost/multiprecision/cpp_dec_float.hpp> #include <boost/multiprecision/number.hpp> #include <boost/math/special_functions.hpp> @@ -141,7 +147,7 @@ namespace Eigen { } -void test_boostmultiprec() +EIGEN_DECLARE_TEST(boostmultiprec) { typedef Matrix<Real,Dynamic,Dynamic> Mat; typedef Matrix<std::complex<Real>,Dynamic,Dynamic> MatC; @@ -152,7 +158,7 @@ void test_boostmultiprec() std::cout << "NumTraits<Real>::highest() = " << NumTraits<Real>::highest() << std::endl; std::cout << "NumTraits<Real>::digits10() = " << NumTraits<Real>::digits10() << std::endl; - // chekc stream output + // check stream output { Mat A(10,10); A.setRandom(); @@ -197,5 +203,6 @@ void test_boostmultiprec() CALL_SUBTEST_9(( jacobisvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); CALL_SUBTEST_10(( bdcsvd(Mat(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); -} + CALL_SUBTEST_11(( test_simplicial_cholesky_T<Real,int,ColMajor>() )); +} diff --git a/test/cholesky.cpp b/test/cholesky.cpp index 8ad5ac639..0b1a7b45b 100644 --- a/test/cholesky.cpp +++ b/test/cholesky.cpp @@ -7,18 +7,16 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -#ifndef EIGEN_NO_ASSERTION_CHECKING -#define EIGEN_NO_ASSERTION_CHECKING -#endif - #define TEST_ENABLE_TEMPORARY_TRACKING #include "main.h" #include <Eigen/Cholesky> #include <Eigen/QR> +#include "solverbase.h" template<typename MatrixType, int UpLo> typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { + if(m.cols()==0) return typename MatrixType::RealScalar(0); MatrixType symm = m.template selfadjointView<UpLo>(); return symm.cwiseAbs().colwise().sum().maxCoeff(); } @@ -57,7 +55,6 @@ template<typename MatrixType,template <typename,int> class CholType> void test_c template<typename MatrixType> void cholesky(const MatrixType& m) { - typedef typename MatrixType::Index Index; /* this test covers the following files: LLT.h LDLT.h */ @@ -81,15 +78,17 @@ template<typename MatrixType> void cholesky(const MatrixType& m) } { + STATIC_CHECK(( internal::is_same<typename LLT<MatrixType,Lower>::StorageIndex,int>::value )); + STATIC_CHECK(( internal::is_same<typename LLT<MatrixType,Upper>::StorageIndex,int>::value )); + SquareMatrixType symmUp = symm.template triangularView<Upper>(); SquareMatrixType symmLo = symm.template triangularView<Lower>(); LLT<SquareMatrixType,Lower> chollo(symmLo); VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); - vecX = chollo.solve(vecB); - VERIFY_IS_APPROX(symm * vecX, vecB); - matX = chollo.solve(matB); - VERIFY_IS_APPROX(symm * matX, matB); + + check_solverbase<VectorType, VectorType>(symm, chollo, rows, rows, 1); + check_solverbase<MatrixType, MatrixType>(symm, chollo, rows, cols, rows); const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols)); RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) / @@ -97,7 +96,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m) RealScalar rcond_est = chollo.rcond(); // Verify that the estimated condition number is within a factor of 10 of the // truth. - VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); // test the upper mode LLT<SquareMatrixType,Upper> cholup(symmUp); @@ -113,12 +112,12 @@ template<typename MatrixType> void cholesky(const MatrixType& m) rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Upper>(symmUp)) / matrix_l1_norm<MatrixType, Upper>(symmUp_inverse); rcond_est = cholup.rcond(); - VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); MatrixType neg = -symmLo; chollo.compute(neg); - VERIFY(chollo.info()==NumericalIssue); + VERIFY(neg.size()==0 || chollo.info()==NumericalIssue); VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU())); VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL())); @@ -143,6 +142,9 @@ template<typename MatrixType> void cholesky(const MatrixType& m) // LDLT { + STATIC_CHECK(( internal::is_same<typename LDLT<MatrixType,Lower>::StorageIndex,int>::value )); + STATIC_CHECK(( internal::is_same<typename LDLT<MatrixType,Upper>::StorageIndex,int>::value )); + int sign = internal::random<int>()%2 ? 1 : -1; if(sign == -1) @@ -156,10 +158,9 @@ template<typename MatrixType> void cholesky(const MatrixType& m) LDLT<SquareMatrixType,Lower> ldltlo(symmLo); VERIFY(ldltlo.info()==Success); VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix()); - vecX = ldltlo.solve(vecB); - VERIFY_IS_APPROX(symm * vecX, vecB); - matX = ldltlo.solve(matB); - VERIFY_IS_APPROX(symm * matX, matB); + + check_solverbase<VectorType, VectorType>(symm, ldltlo, rows, rows, 1); + check_solverbase<MatrixType, MatrixType>(symm, ldltlo, rows, cols, rows); const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols)); RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) / @@ -167,7 +168,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m) RealScalar rcond_est = ldltlo.rcond(); // Verify that the estimated condition number is within a factor of 10 of the // truth. - VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); LDLT<SquareMatrixType,Upper> ldltup(symmUp); @@ -184,7 +185,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m) rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Upper>(symmUp)) / matrix_l1_norm<MatrixType, Upper>(symmUp_inverse); rcond_est = ldltup.rcond(); - VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU())); VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL())); @@ -289,8 +290,6 @@ template<typename MatrixType> void cholesky_cplx(const MatrixType& m) // test mixing real/scalar types - typedef typename MatrixType::Index Index; - Index rows = m.rows(); Index cols = m.cols(); @@ -315,10 +314,9 @@ template<typename MatrixType> void cholesky_cplx(const MatrixType& m) LLT<RealMatrixType,Lower> chollo(symmLo); VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); - vecX = chollo.solve(vecB); - VERIFY_IS_APPROX(symm * vecX, vecB); -// matX = chollo.solve(matB); -// VERIFY_IS_APPROX(symm * matX, matB); + + check_solverbase<VectorType, VectorType>(symm, chollo, rows, rows, 1); + //check_solverbase<MatrixType, MatrixType>(symm, chollo, rows, cols, rows); } // LDLT @@ -335,10 +333,9 @@ template<typename MatrixType> void cholesky_cplx(const MatrixType& m) LDLT<RealMatrixType,Lower> ldltlo(symmLo); VERIFY(ldltlo.info()==Success); VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix()); - vecX = ldltlo.solve(vecB); - VERIFY_IS_APPROX(symm * vecX, vecB); -// matX = ldltlo.solve(matB); -// VERIFY_IS_APPROX(symm * matX, matB); + + check_solverbase<VectorType, VectorType>(symm, ldltlo, rows, rows, 1); + //check_solverbase<MatrixType, MatrixType>(symm, ldltlo, rows, cols, rows); } } @@ -373,6 +370,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m) VERIFY(ldlt.info()==Success); VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << 1, 2, 2, 1; @@ -380,6 +378,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m) VERIFY(ldlt.info()==Success); VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << 0, 0, 0, 0; @@ -387,6 +386,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m) VERIFY(ldlt.info()==Success); VERIFY(ldlt.isNegative()); VERIFY(ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << 0, 0, 0, 1; @@ -394,6 +394,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m) VERIFY(ldlt.info()==Success); VERIFY(!ldlt.isNegative()); VERIFY(ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << -1, 0, 0, 0; @@ -401,6 +402,7 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m) VERIFY(ldlt.info()==Success); VERIFY(ldlt.isNegative()); VERIFY(!ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } } @@ -452,6 +454,18 @@ void cholesky_faillure_cases() VERIFY(ldlt.info()==NumericalIssue); VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); } + + // bug 1479 + { + mat.resize(4,4); + mat << 1, 2, 0, 1, + 2, 4, 0, 2, + 0, 0, 0, 1, + 1, 2, 1, 1; + ldlt.compute(mat); + VERIFY(ldlt.info()==NumericalIssue); + VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); + } } template<typename MatrixType> void cholesky_verify_assert() @@ -462,19 +476,23 @@ template<typename MatrixType> void cholesky_verify_assert() VERIFY_RAISES_ASSERT(llt.matrixL()) VERIFY_RAISES_ASSERT(llt.matrixU()) VERIFY_RAISES_ASSERT(llt.solve(tmp)) - VERIFY_RAISES_ASSERT(llt.solveInPlace(&tmp)) + VERIFY_RAISES_ASSERT(llt.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(llt.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(llt.solveInPlace(tmp)) LDLT<MatrixType> ldlt; VERIFY_RAISES_ASSERT(ldlt.matrixL()) - VERIFY_RAISES_ASSERT(ldlt.permutationP()) + VERIFY_RAISES_ASSERT(ldlt.transpositionsP()) VERIFY_RAISES_ASSERT(ldlt.vectorD()) VERIFY_RAISES_ASSERT(ldlt.isPositive()) VERIFY_RAISES_ASSERT(ldlt.isNegative()) VERIFY_RAISES_ASSERT(ldlt.solve(tmp)) - VERIFY_RAISES_ASSERT(ldlt.solveInPlace(&tmp)) + VERIFY_RAISES_ASSERT(ldlt.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(ldlt.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(ldlt.solveInPlace(tmp)) } -void test_cholesky() +EIGEN_DECLARE_TEST(cholesky) { int s = 0; for(int i = 0; i < g_repeat; i++) { @@ -493,6 +511,11 @@ void test_cholesky() CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } + // empty matrix, regression test for Bug 785: + CALL_SUBTEST_2( cholesky(MatrixXd(0,0)) ); + + // This does not work yet: + // CALL_SUBTEST_2( cholesky(Matrix<double,0,0>()) ); CALL_SUBTEST_4( cholesky_verify_assert<Matrix3f>() ); CALL_SUBTEST_7( cholesky_verify_assert<Matrix3d>() ); diff --git a/test/cholmod_support.cpp b/test/cholmod_support.cpp index a7eda28f7..89b9cf41e 100644 --- a/test/cholmod_support.cpp +++ b/test/cholmod_support.cpp @@ -12,21 +12,21 @@ #include <Eigen/CholmodSupport> -template<typename T> void test_cholmod_T() +template<typename SparseType> void test_cholmod_ST() { - CholmodDecomposition<SparseMatrix<T>, Lower> g_chol_colmajor_lower; g_chol_colmajor_lower.setMode(CholmodSupernodalLLt); - CholmodDecomposition<SparseMatrix<T>, Upper> g_chol_colmajor_upper; g_chol_colmajor_upper.setMode(CholmodSupernodalLLt); - CholmodDecomposition<SparseMatrix<T>, Lower> g_llt_colmajor_lower; g_llt_colmajor_lower.setMode(CholmodSimplicialLLt); - CholmodDecomposition<SparseMatrix<T>, Upper> g_llt_colmajor_upper; g_llt_colmajor_upper.setMode(CholmodSimplicialLLt); - CholmodDecomposition<SparseMatrix<T>, Lower> g_ldlt_colmajor_lower; g_ldlt_colmajor_lower.setMode(CholmodLDLt); - CholmodDecomposition<SparseMatrix<T>, Upper> g_ldlt_colmajor_upper; g_ldlt_colmajor_upper.setMode(CholmodLDLt); + CholmodDecomposition<SparseType, Lower> g_chol_colmajor_lower; g_chol_colmajor_lower.setMode(CholmodSupernodalLLt); + CholmodDecomposition<SparseType, Upper> g_chol_colmajor_upper; g_chol_colmajor_upper.setMode(CholmodSupernodalLLt); + CholmodDecomposition<SparseType, Lower> g_llt_colmajor_lower; g_llt_colmajor_lower.setMode(CholmodSimplicialLLt); + CholmodDecomposition<SparseType, Upper> g_llt_colmajor_upper; g_llt_colmajor_upper.setMode(CholmodSimplicialLLt); + CholmodDecomposition<SparseType, Lower> g_ldlt_colmajor_lower; g_ldlt_colmajor_lower.setMode(CholmodLDLt); + CholmodDecomposition<SparseType, Upper> g_ldlt_colmajor_upper; g_ldlt_colmajor_upper.setMode(CholmodLDLt); - CholmodSupernodalLLT<SparseMatrix<T>, Lower> chol_colmajor_lower; - CholmodSupernodalLLT<SparseMatrix<T>, Upper> chol_colmajor_upper; - CholmodSimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower; - CholmodSimplicialLLT<SparseMatrix<T>, Upper> llt_colmajor_upper; - CholmodSimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower; - CholmodSimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper; + CholmodSupernodalLLT<SparseType, Lower> chol_colmajor_lower; + CholmodSupernodalLLT<SparseType, Upper> chol_colmajor_upper; + CholmodSimplicialLLT<SparseType, Lower> llt_colmajor_lower; + CholmodSimplicialLLT<SparseType, Upper> llt_colmajor_upper; + CholmodSimplicialLDLT<SparseType, Lower> ldlt_colmajor_lower; + CholmodSimplicialLDLT<SparseType, Upper> ldlt_colmajor_upper; check_sparse_spd_solving(g_chol_colmajor_lower); check_sparse_spd_solving(g_chol_colmajor_upper); @@ -50,8 +50,20 @@ template<typename T> void test_cholmod_T() check_sparse_spd_determinant(ldlt_colmajor_upper); } -void test_cholmod_support() +template<typename T, int flags, typename IdxType> void test_cholmod_T() { - CALL_SUBTEST_1(test_cholmod_T<double>()); - CALL_SUBTEST_2(test_cholmod_T<std::complex<double> >()); + test_cholmod_ST<SparseMatrix<T, flags, IdxType> >(); +} + +EIGEN_DECLARE_TEST(cholmod_support) +{ + CALL_SUBTEST_11( (test_cholmod_T<double , ColMajor, int >()) ); + CALL_SUBTEST_12( (test_cholmod_T<double , ColMajor, long>()) ); + CALL_SUBTEST_13( (test_cholmod_T<double , RowMajor, int >()) ); + CALL_SUBTEST_14( (test_cholmod_T<double , RowMajor, long>()) ); + CALL_SUBTEST_21( (test_cholmod_T<std::complex<double>, ColMajor, int >()) ); + CALL_SUBTEST_22( (test_cholmod_T<std::complex<double>, ColMajor, long>()) ); + // TODO complex row-major matrices do not work at the moment: + // CALL_SUBTEST_23( (test_cholmod_T<std::complex<double>, RowMajor, int >()) ); + // CALL_SUBTEST_24( (test_cholmod_T<std::complex<double>, RowMajor, long>()) ); } diff --git a/test/commainitializer.cpp b/test/commainitializer.cpp index 9844adbd2..eb275be9c 100644 --- a/test/commainitializer.cpp +++ b/test/commainitializer.cpp @@ -34,8 +34,14 @@ void test_blocks() if(N1 > 0) { - VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22)); - VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22)); + if(M1 > 0) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22)); + } + if(M2 > 0) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22)); + } } else { @@ -49,24 +55,25 @@ void test_blocks() } -template<int N> +template<int depth, int N=0> struct test_block_recursion { static void run() { - test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>(); - test_block_recursion<N-1>::run(); + test_block_recursion<depth-1, N>::run(); + test_block_recursion<depth-1, N + (1 << (depth-1))>::run(); } }; -template<> -struct test_block_recursion<-1> +template<int N> +struct test_block_recursion<0,N> { - static void run() { } + static void run() { + test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>(); + } }; -void test_commainitializer() -{ +void test_basics() { Matrix3d m3; Matrix4d m4; @@ -99,8 +106,13 @@ void test_commainitializer() 4, 5, 6, vec[2].transpose(); VERIFY_IS_APPROX(m3, ref); +} + +EIGEN_DECLARE_TEST(commainitializer) +{ + CALL_SUBTEST_1(test_basics()); // recursively test all block-sizes from 0 to 3: - test_block_recursion<(1<<8) - 1>(); + CALL_SUBTEST_2(test_block_recursion<8>::run()); } diff --git a/test/conjugate_gradient.cpp b/test/conjugate_gradient.cpp index 9622fd86d..b076a126b 100644 --- a/test/conjugate_gradient.cpp +++ b/test/conjugate_gradient.cpp @@ -10,9 +10,9 @@ #include "sparse_solver.h" #include <Eigen/IterativeLinearSolvers> -template<typename T, typename I> void test_conjugate_gradient_T() +template<typename T, typename I_> void test_conjugate_gradient_T() { - typedef SparseMatrix<T,0,I> SparseMatrixType; + typedef SparseMatrix<T,0,I_> SparseMatrixType; ConjugateGradient<SparseMatrixType, Lower > cg_colmajor_lower_diag; ConjugateGradient<SparseMatrixType, Upper > cg_colmajor_upper_diag; ConjugateGradient<SparseMatrixType, Lower|Upper> cg_colmajor_loup_diag; @@ -26,7 +26,7 @@ template<typename T, typename I> void test_conjugate_gradient_T() CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) ); } -void test_conjugate_gradient() +EIGEN_DECLARE_TEST(conjugate_gradient) { CALL_SUBTEST_1(( test_conjugate_gradient_T<double,int>() )); CALL_SUBTEST_2(( test_conjugate_gradient_T<std::complex<double>, int>() )); diff --git a/test/conservative_resize.cpp b/test/conservative_resize.cpp index 498421b4c..d48eb126f 100644 --- a/test/conservative_resize.cpp +++ b/test/conservative_resize.cpp @@ -10,6 +10,7 @@ #include "main.h" #include <Eigen/Core> +#include "AnnoyingScalar.h" using namespace Eigen; @@ -17,7 +18,6 @@ template <typename Scalar, int Storage> void run_matrix_tests() { typedef Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Storage> MatrixType; - typedef typename MatrixType::Index Index; MatrixType m, n; @@ -110,7 +110,33 @@ void run_vector_tests() } } -void test_conservative_resize() +// Basic memory leak check with a non-copyable scalar type +template<int> void noncopyable() +{ + typedef Eigen::Matrix<AnnoyingScalar,Dynamic,1> VectorType; + typedef Eigen::Matrix<AnnoyingScalar,Dynamic,Dynamic> MatrixType; + + { +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW + AnnoyingScalar::dont_throw = true; +#endif + int n = 50; + VectorType v0(n), v1(n); + MatrixType m0(n,n), m1(n,n), m2(n,n); + v0.setOnes(); v1.setOnes(); + m0.setOnes(); m1.setOnes(); m2.setOnes(); + VERIFY(m0==m1); + m0.conservativeResize(2*n,2*n); + VERIFY(m0.topLeftCorner(n,n) == m1); + + VERIFY(v0.head(n) == v1); + v0.conservativeResize(2*n); + VERIFY(v0.head(n) == v1); + } + VERIFY(AnnoyingScalar::instances==0 && "global memory leak detected in noncopyable"); +} + +EIGEN_DECLARE_TEST(conservative_resize) { for(int i=0; i<g_repeat; ++i) { @@ -123,12 +149,19 @@ void test_conservative_resize() CALL_SUBTEST_4((run_matrix_tests<std::complex<float>, Eigen::RowMajor>())); CALL_SUBTEST_4((run_matrix_tests<std::complex<float>, Eigen::ColMajor>())); CALL_SUBTEST_5((run_matrix_tests<std::complex<double>, Eigen::RowMajor>())); - CALL_SUBTEST_6((run_matrix_tests<std::complex<double>, Eigen::ColMajor>())); + CALL_SUBTEST_5((run_matrix_tests<std::complex<double>, Eigen::ColMajor>())); + CALL_SUBTEST_1((run_matrix_tests<int, Eigen::RowMajor | Eigen::DontAlign>())); CALL_SUBTEST_1((run_vector_tests<int>())); CALL_SUBTEST_2((run_vector_tests<float>())); CALL_SUBTEST_3((run_vector_tests<double>())); CALL_SUBTEST_4((run_vector_tests<std::complex<float> >())); CALL_SUBTEST_5((run_vector_tests<std::complex<double> >())); + +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW + AnnoyingScalar::dont_throw = true; +#endif + CALL_SUBTEST_6(( run_vector_tests<AnnoyingScalar>() )); + CALL_SUBTEST_6(( noncopyable<0>() )); } } diff --git a/test/constructor.cpp b/test/constructor.cpp index eec9e2192..ffd5e802a 100644 --- a/test/constructor.cpp +++ b/test/constructor.cpp @@ -20,6 +20,8 @@ template<typename MatrixType> struct Wrapper inline operator MatrixType& () { return m_mat; } }; +enum my_sizes { M = 12, N = 7}; + template<typename MatrixType> void ctor_init1(const MatrixType& m) { // Check logic in PlainObjectBase::_init1 @@ -37,7 +39,7 @@ template<typename MatrixType> void ctor_init1(const MatrixType& m) } -void test_constructor() +EIGEN_DECLARE_TEST(constructor) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( ctor_init1(Matrix<float, 1, 1>()) ); @@ -81,4 +83,16 @@ void test_constructor() Array<float,3,3> a(123); VERIFY_IS_EQUAL(a(4), 123.f); } + { + MatrixXi m1(M,N); + VERIFY_IS_EQUAL(m1.rows(),M); + VERIFY_IS_EQUAL(m1.cols(),N); + ArrayXXi a1(M,N); + VERIFY_IS_EQUAL(a1.rows(),M); + VERIFY_IS_EQUAL(a1.cols(),N); + VectorXi v1(M); + VERIFY_IS_EQUAL(v1.size(),M); + ArrayXi a2(M); + VERIFY_IS_EQUAL(a2.size(),M); + } } diff --git a/test/corners.cpp b/test/corners.cpp index 3c64c32a1..73342a8dd 100644 --- a/test/corners.cpp +++ b/test/corners.cpp @@ -15,7 +15,6 @@ template<typename MatrixType> void corners(const MatrixType& m) { - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -102,7 +101,7 @@ template<typename MatrixType, int CRows, int CCols, int SRows, int SCols> void c VERIFY_IS_EQUAL((const_matrix.template rightCols<c>()), (const_matrix.template block<rows,c>(0,cols-c))); } -void test_corners() +EIGEN_DECLARE_TEST(corners) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( corners(Matrix<float, 1, 1>()) ); diff --git a/test/ctorleak.cpp b/test/ctorleak.cpp index c158f5e4e..73904176b 100644 --- a/test/ctorleak.cpp +++ b/test/ctorleak.cpp @@ -8,7 +8,7 @@ struct Foo static Index object_limit; int dummy; - Foo() + Foo() : dummy(0) { #ifdef EIGEN_EXCEPTIONS // TODO: Is this the correct way to handle this? @@ -33,26 +33,37 @@ Index Foo::object_limit = 0; #undef EIGEN_TEST_MAX_SIZE #define EIGEN_TEST_MAX_SIZE 3 -void test_ctorleak() +EIGEN_DECLARE_TEST(ctorleak) { typedef Matrix<Foo, Dynamic, Dynamic> MatrixX; typedef Matrix<Foo, Dynamic, 1> VectorX; + Foo::object_count = 0; for(int i = 0; i < g_repeat; i++) { Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE); - Foo::object_limit = internal::random<Index>(0, rows*cols - 2); + Foo::object_limit = rows*cols; + { + MatrixX r(rows, cols); + Foo::object_limit = r.size()+internal::random<Index>(0, rows*cols - 2); std::cout << "object_limit =" << Foo::object_limit << std::endl; #ifdef EIGEN_EXCEPTIONS try { #endif - std::cout << "\nMatrixX m(" << rows << ", " << cols << ");\n"; - MatrixX m(rows, cols); + if(internal::random<bool>()) { + std::cout << "\nMatrixX m(" << rows << ", " << cols << ");\n"; + MatrixX m(rows, cols); + } + else { + std::cout << "\nMatrixX m(r);\n"; + MatrixX m(r); + } #ifdef EIGEN_EXCEPTIONS VERIFY(false); // not reached if exceptions are enabled } catch (const Foo::Fail&) { /* ignore */ } #endif + } VERIFY_IS_EQUAL(Index(0), Foo::object_count); { @@ -66,4 +77,5 @@ void test_ctorleak() } VERIFY_IS_EQUAL(Index(0), Foo::object_count); } + std::cout << "\n"; } diff --git a/test/cuda_basic.cu b/test/cuda_basic.cu deleted file mode 100644 index cb2e4167a..000000000 --- a/test/cuda_basic.cu +++ /dev/null @@ -1,173 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2015-2016 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -// workaround issue between gcc >= 4.7 and cuda 5.5 -#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7) - #undef _GLIBCXX_ATOMIC_BUILTINS - #undef _GLIBCXX_USE_INT128 -#endif - -#define EIGEN_TEST_NO_LONGDOUBLE -#define EIGEN_TEST_NO_COMPLEX -#define EIGEN_TEST_FUNC cuda_basic -#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int - -#include <math_constants.h> -#include <cuda.h> -#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 -#include <cuda_fp16.h> -#endif -#include "main.h" -#include "cuda_common.h" - -// Check that dense modules can be properly parsed by nvcc -#include <Eigen/Dense> - -// struct Foo{ -// EIGEN_DEVICE_FUNC -// void operator()(int i, const float* mats, float* vecs) const { -// using namespace Eigen; -// // Matrix3f M(data); -// // Vector3f x(data+9); -// // Map<Vector3f>(data+9) = M.inverse() * x; -// Matrix3f M(mats+i/16); -// Vector3f x(vecs+i*3); -// // using std::min; -// // using std::sqrt; -// Map<Vector3f>(vecs+i*3) << x.minCoeff(), 1, 2;// / x.dot(x);//(M.inverse() * x) / x.x(); -// //x = x*2 + x.y() * x + x * x.maxCoeff() - x / x.sum(); -// } -// }; - -template<typename T> -struct coeff_wise { - EIGEN_DEVICE_FUNC - void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const - { - using namespace Eigen; - T x1(in+i); - T x2(in+i+1); - T x3(in+i+2); - Map<T> res(out+i*T::MaxSizeAtCompileTime); - - res.array() += (in[0] * x1 + x2).array() * x3.array(); - } -}; - -template<typename T> -struct replicate { - EIGEN_DEVICE_FUNC - void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const - { - using namespace Eigen; - T x1(in+i); - int step = x1.size() * 4; - int stride = 3 * step; - - typedef Map<Array<typename T::Scalar,Dynamic,Dynamic> > MapType; - MapType(out+i*stride+0*step, x1.rows()*2, x1.cols()*2) = x1.replicate(2,2); - MapType(out+i*stride+1*step, x1.rows()*3, x1.cols()) = in[i] * x1.colwise().replicate(3); - MapType(out+i*stride+2*step, x1.rows(), x1.cols()*3) = in[i] * x1.rowwise().replicate(3); - } -}; - -template<typename T> -struct redux { - EIGEN_DEVICE_FUNC - void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const - { - using namespace Eigen; - int N = 10; - T x1(in+i); - out[i*N+0] = x1.minCoeff(); - out[i*N+1] = x1.maxCoeff(); - out[i*N+2] = x1.sum(); - out[i*N+3] = x1.prod(); - out[i*N+4] = x1.matrix().squaredNorm(); - out[i*N+5] = x1.matrix().norm(); - out[i*N+6] = x1.colwise().sum().maxCoeff(); - out[i*N+7] = x1.rowwise().maxCoeff().sum(); - out[i*N+8] = x1.matrix().colwise().squaredNorm().sum(); - } -}; - -template<typename T1, typename T2> -struct prod_test { - EIGEN_DEVICE_FUNC - void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const - { - using namespace Eigen; - typedef Matrix<typename T1::Scalar, T1::RowsAtCompileTime, T2::ColsAtCompileTime> T3; - T1 x1(in+i); - T2 x2(in+i+1); - Map<T3> res(out+i*T3::MaxSizeAtCompileTime); - res += in[i] * x1 * x2; - } -}; - -template<typename T1, typename T2> -struct diagonal { - EIGEN_DEVICE_FUNC - void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const - { - using namespace Eigen; - T1 x1(in+i); - Map<T2> res(out+i*T2::MaxSizeAtCompileTime); - res += x1.diagonal(); - } -}; - -template<typename T> -struct eigenvalues { - EIGEN_DEVICE_FUNC - void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const - { - using namespace Eigen; - typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec; - T M(in+i); - Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime); - T A = M*M.adjoint(); - SelfAdjointEigenSolver<T> eig; - eig.computeDirect(M); - res = eig.eigenvalues(); - } -}; - -void test_cuda_basic() -{ - ei_test_init_cuda(); - - int nthreads = 100; - Eigen::VectorXf in, out; - - #ifndef __CUDA_ARCH__ - int data_size = nthreads * 512; - in.setRandom(data_size); - out.setRandom(data_size); - #endif - - CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Vector3f>(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Array44f>(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array4f>(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array33f>(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(redux<Array4f>(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(redux<Matrix3f>(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix3f>(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix2f>(), nthreads, in, out) ); - -} diff --git a/test/cuda_common.h b/test/cuda_common.h deleted file mode 100644 index 9737693ac..000000000 --- a/test/cuda_common.h +++ /dev/null @@ -1,101 +0,0 @@ - -#ifndef EIGEN_TEST_CUDA_COMMON_H -#define EIGEN_TEST_CUDA_COMMON_H - -#include <cuda.h> -#include <cuda_runtime.h> -#include <cuda_runtime_api.h> -#include <iostream> - -#ifndef __CUDACC__ -dim3 threadIdx, blockDim, blockIdx; -#endif - -template<typename Kernel, typename Input, typename Output> -void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out) -{ - for(int i=0; i<n; i++) - ker(i, in.data(), out.data()); -} - - -template<typename Kernel, typename Input, typename Output> -__global__ -void run_on_cuda_meta_kernel(const Kernel ker, int n, const Input* in, Output* out) -{ - int i = threadIdx.x + blockIdx.x*blockDim.x; - if(i<n) { - ker(i, in, out); - } -} - - -template<typename Kernel, typename Input, typename Output> -void run_on_cuda(const Kernel& ker, int n, const Input& in, Output& out) -{ - typename Input::Scalar* d_in; - typename Output::Scalar* d_out; - std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar); - std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar); - - cudaMalloc((void**)(&d_in), in_bytes); - cudaMalloc((void**)(&d_out), out_bytes); - - cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice); - cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice); - - // Simple and non-optimal 1D mapping assuming n is not too large - // That's only for unit testing! - dim3 Blocks(128); - dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) ); - - cudaThreadSynchronize(); - run_on_cuda_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out); - cudaThreadSynchronize(); - - // check inputs have not been modified - cudaMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost); - cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost); - - cudaFree(d_in); - cudaFree(d_out); -} - - -template<typename Kernel, typename Input, typename Output> -void run_and_compare_to_cuda(const Kernel& ker, int n, const Input& in, Output& out) -{ - Input in_ref, in_cuda; - Output out_ref, out_cuda; - #ifndef __CUDA_ARCH__ - in_ref = in_cuda = in; - out_ref = out_cuda = out; - #endif - run_on_cpu (ker, n, in_ref, out_ref); - run_on_cuda(ker, n, in_cuda, out_cuda); - #ifndef __CUDA_ARCH__ - VERIFY_IS_APPROX(in_ref, in_cuda); - VERIFY_IS_APPROX(out_ref, out_cuda); - #endif -} - - -void ei_test_init_cuda() -{ - int device = 0; - cudaDeviceProp deviceProp; - cudaGetDeviceProperties(&deviceProp, device); - std::cout << "CUDA device info:\n"; - std::cout << " name: " << deviceProp.name << "\n"; - std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n"; - std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n"; - std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n"; - std::cout << " warpSize: " << deviceProp.warpSize << "\n"; - std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n"; - std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n"; - std::cout << " clockRate: " << deviceProp.clockRate << "\n"; - std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n"; - std::cout << " computeMode: " << deviceProp.computeMode << "\n"; -} - -#endif // EIGEN_TEST_CUDA_COMMON_H diff --git a/test/denseLM.cpp b/test/denseLM.cpp index 0aa736ea3..afb8004b1 100644 --- a/test/denseLM.cpp +++ b/test/denseLM.cpp @@ -182,7 +182,7 @@ void test_denseLM_T() } -void test_denseLM() +EIGEN_DECLARE_TEST(denseLM) { CALL_SUBTEST_2(test_denseLM_T<double>()); diff --git a/test/dense_storage.cpp b/test/dense_storage.cpp index e63712b1a..45c2bd728 100644 --- a/test/dense_storage.cpp +++ b/test/dense_storage.cpp @@ -8,17 +8,27 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" +#include "AnnoyingScalar.h" +#include "SafeScalar.h" #include <Eigen/Core> -template <typename T, int Rows, int Cols> -void dense_storage_copy() +#if EIGEN_HAS_TYPE_TRAITS && EIGEN_HAS_CXX11 +using DenseStorageD3x3 = Eigen::DenseStorage<double, 3, 3, 3, 3>; +static_assert(std::is_trivially_move_constructible<DenseStorageD3x3>::value, "DenseStorage not trivially_move_constructible"); +static_assert(std::is_trivially_move_assignable<DenseStorageD3x3>::value, "DenseStorage not trivially_move_assignable"); +#if !defined(EIGEN_DENSE_STORAGE_CTOR_PLUGIN) +static_assert(std::is_trivially_copy_constructible<DenseStorageD3x3>::value, "DenseStorage not trivially_copy_constructible"); +static_assert(std::is_trivially_copy_assignable<DenseStorageD3x3>::value, "DenseStorage not trivially_copy_assignable"); +static_assert(std::is_trivially_copyable<DenseStorageD3x3>::value, "DenseStorage not trivially_copyable"); +#endif +#endif + +template <typename T, int Size, int Rows, int Cols> +void dense_storage_copy(int rows, int cols) { - static const int Size = ((Rows==Dynamic || Cols==Dynamic) ? Dynamic : Rows*Cols); - typedef DenseStorage<T,Size, Rows,Cols, 0> DenseStorageType; + typedef DenseStorage<T, Size, Rows, Cols, 0> DenseStorageType; - const int rows = (Rows==Dynamic) ? 4 : Rows; - const int cols = (Cols==Dynamic) ? 3 : Cols; const int size = rows*cols; DenseStorageType reference(size, rows, cols); T* raw_reference = reference.data(); @@ -31,14 +41,11 @@ void dense_storage_copy() VERIFY_IS_EQUAL(raw_reference[i], raw_copied_reference[i]); } -template <typename T, int Rows, int Cols> -void dense_storage_assignment() +template <typename T, int Size, int Rows, int Cols> +void dense_storage_assignment(int rows, int cols) { - static const int Size = ((Rows==Dynamic || Cols==Dynamic) ? Dynamic : Rows*Cols); - typedef DenseStorage<T,Size, Rows,Cols, 0> DenseStorageType; + typedef DenseStorage<T, Size, Rows, Cols, 0> DenseStorageType; - const int rows = (Rows==Dynamic) ? 4 : Rows; - const int cols = (Cols==Dynamic) ? 3 : Cols; const int size = rows*cols; DenseStorageType reference(size, rows, cols); T* raw_reference = reference.data(); @@ -52,25 +59,132 @@ void dense_storage_assignment() VERIFY_IS_EQUAL(raw_reference[i], raw_copied_reference[i]); } -void test_dense_storage() +template <typename T, int Size, int Rows, int Cols> +void dense_storage_swap(int rows0, int cols0, int rows1, int cols1) { - dense_storage_copy<int,Dynamic,Dynamic>(); - dense_storage_copy<int,Dynamic,3>(); - dense_storage_copy<int,4,Dynamic>(); - dense_storage_copy<int,4,3>(); + typedef DenseStorage<T, Size, Rows, Cols, 0> DenseStorageType; + + const int size0 = rows0*cols0; + DenseStorageType a(size0, rows0, cols0); + for (int i=0; i<size0; ++i) { + a.data()[i] = static_cast<T>(i); + } + + const int size1 = rows1*cols1; + DenseStorageType b(size1, rows1, cols1); + for (int i=0; i<size1; ++i) { + b.data()[i] = static_cast<T>(-i); + } + + a.swap(b); + + for (int i=0; i<size0; ++i) { + VERIFY_IS_EQUAL(b.data()[i], static_cast<T>(i)); + } + + for (int i=0; i<size1; ++i) { + VERIFY_IS_EQUAL(a.data()[i], static_cast<T>(-i)); + } +} - dense_storage_copy<float,Dynamic,Dynamic>(); - dense_storage_copy<float,Dynamic,3>(); - dense_storage_copy<float,4,Dynamic>(); - dense_storage_copy<float,4,3>(); +template<typename T, int Size, std::size_t Alignment> +void dense_storage_alignment() +{ + #if EIGEN_HAS_ALIGNAS - dense_storage_assignment<int,Dynamic,Dynamic>(); - dense_storage_assignment<int,Dynamic,3>(); - dense_storage_assignment<int,4,Dynamic>(); - dense_storage_assignment<int,4,3>(); + struct alignas(Alignment) Empty1 {}; + VERIFY_IS_EQUAL(std::alignment_of<Empty1>::value, Alignment); + + struct EIGEN_ALIGN_TO_BOUNDARY(Alignment) Empty2 {}; + VERIFY_IS_EQUAL(std::alignment_of<Empty2>::value, Alignment); + + struct Nested1 { EIGEN_ALIGN_TO_BOUNDARY(Alignment) T data[Size]; }; + VERIFY_IS_EQUAL(std::alignment_of<Nested1>::value, Alignment); + + VERIFY_IS_EQUAL( (std::alignment_of<internal::plain_array<T,Size,AutoAlign,Alignment> >::value), Alignment); - dense_storage_assignment<float,Dynamic,Dynamic>(); - dense_storage_assignment<float,Dynamic,3>(); - dense_storage_assignment<float,4,Dynamic>(); - dense_storage_assignment<float,4,3>(); + const std::size_t default_alignment = internal::compute_default_alignment<T,Size>::value; + + VERIFY_IS_EQUAL( (std::alignment_of<DenseStorage<T,Size,1,1,AutoAlign> >::value), default_alignment); + VERIFY_IS_EQUAL( (std::alignment_of<Matrix<T,Size,1,AutoAlign> >::value), default_alignment); + struct Nested2 { Matrix<T,Size,1,AutoAlign> mat; }; + VERIFY_IS_EQUAL(std::alignment_of<Nested2>::value, default_alignment); + + #endif +} + +template<typename T> +void dense_storage_tests() { + // Dynamic Storage. + dense_storage_copy<T,Dynamic,Dynamic,Dynamic>(4, 3); + dense_storage_copy<T,Dynamic,Dynamic,3>(4, 3); + dense_storage_copy<T,Dynamic,4,Dynamic>(4, 3); + // Fixed Storage. + dense_storage_copy<T,12,4,3>(4, 3); + dense_storage_copy<T,12,Dynamic,Dynamic>(4, 3); + dense_storage_copy<T,12,4,Dynamic>(4, 3); + dense_storage_copy<T,12,Dynamic,3>(4, 3); + // Fixed Storage with Uninitialized Elements. + dense_storage_copy<T,18,Dynamic,Dynamic>(4, 3); + dense_storage_copy<T,18,4,Dynamic>(4, 3); + dense_storage_copy<T,18,Dynamic,3>(4, 3); + + // Dynamic Storage. + dense_storage_assignment<T,Dynamic,Dynamic,Dynamic>(4, 3); + dense_storage_assignment<T,Dynamic,Dynamic,3>(4, 3); + dense_storage_assignment<T,Dynamic,4,Dynamic>(4, 3); + // Fixed Storage. + dense_storage_assignment<T,12,4,3>(4, 3); + dense_storage_assignment<T,12,Dynamic,Dynamic>(4, 3); + dense_storage_assignment<T,12,4,Dynamic>(4, 3); + dense_storage_assignment<T,12,Dynamic,3>(4, 3); + // Fixed Storage with Uninitialized Elements. + dense_storage_assignment<T,18,Dynamic,Dynamic>(4, 3); + dense_storage_assignment<T,18,4,Dynamic>(4, 3); + dense_storage_assignment<T,18,Dynamic,3>(4, 3); + + // Dynamic Storage. + dense_storage_swap<T,Dynamic,Dynamic,Dynamic>(4, 3, 4, 3); + dense_storage_swap<T,Dynamic,Dynamic,Dynamic>(4, 3, 2, 1); + dense_storage_swap<T,Dynamic,Dynamic,Dynamic>(2, 1, 4, 3); + dense_storage_swap<T,Dynamic,Dynamic,3>(4, 3, 4, 3); + dense_storage_swap<T,Dynamic,Dynamic,3>(4, 3, 2, 3); + dense_storage_swap<T,Dynamic,Dynamic,3>(2, 3, 4, 3); + dense_storage_swap<T,Dynamic,4,Dynamic>(4, 3, 4, 3); + dense_storage_swap<T,Dynamic,4,Dynamic>(4, 3, 4, 1); + dense_storage_swap<T,Dynamic,4,Dynamic>(4, 1, 4, 3); + // Fixed Storage. + dense_storage_swap<T,12,4,3>(4, 3, 4, 3); + dense_storage_swap<T,12,Dynamic,Dynamic>(4, 3, 4, 3); + dense_storage_swap<T,12,Dynamic,Dynamic>(4, 3, 2, 1); + dense_storage_swap<T,12,Dynamic,Dynamic>(2, 1, 4, 3); + dense_storage_swap<T,12,4,Dynamic>(4, 3, 4, 3); + dense_storage_swap<T,12,4,Dynamic>(4, 3, 4, 1); + dense_storage_swap<T,12,4,Dynamic>(4, 1, 4, 3); + dense_storage_swap<T,12,Dynamic,3>(4, 3, 4, 3); + dense_storage_swap<T,12,Dynamic,3>(4, 3, 2, 3); + dense_storage_swap<T,12,Dynamic,3>(2, 3, 4, 3); + // Fixed Storage with Uninitialized Elements. + dense_storage_swap<T,18,Dynamic,Dynamic>(4, 3, 4, 3); + dense_storage_swap<T,18,Dynamic,Dynamic>(4, 3, 2, 1); + dense_storage_swap<T,18,Dynamic,Dynamic>(2, 1, 4, 3); + dense_storage_swap<T,18,4,Dynamic>(4, 3, 4, 3); + dense_storage_swap<T,18,4,Dynamic>(4, 3, 4, 1); + dense_storage_swap<T,18,4,Dynamic>(4, 1, 4, 3); + dense_storage_swap<T,18,Dynamic,3>(4, 3, 4, 3); + dense_storage_swap<T,18,Dynamic,3>(4, 3, 2, 3); + dense_storage_swap<T,18,Dynamic,3>(2, 3, 4, 3); + + dense_storage_alignment<T,16,8>(); + dense_storage_alignment<T,16,16>(); + dense_storage_alignment<T,16,32>(); + dense_storage_alignment<T,16,64>(); +} + +EIGEN_DECLARE_TEST(dense_storage) +{ + dense_storage_tests<int>(); + dense_storage_tests<float>(); + dense_storage_tests<SafeScalar<float> >(); + dense_storage_tests<AnnoyingScalar>(); } diff --git a/test/determinant.cpp b/test/determinant.cpp index 758f3afbb..7dd33c373 100644 --- a/test/determinant.cpp +++ b/test/determinant.cpp @@ -16,7 +16,6 @@ template<typename MatrixType> void determinant(const MatrixType& m) /* this test covers the following files: Determinant.h */ - typedef typename MatrixType::Index Index; Index size = m.rows(); MatrixType m1(size, size), m2(size, size); @@ -51,7 +50,7 @@ template<typename MatrixType> void determinant(const MatrixType& m) VERIFY_IS_APPROX(m2.block(0,0,0,0).determinant(), Scalar(1)); } -void test_determinant() +EIGEN_DECLARE_TEST(determinant) { for(int i = 0; i < g_repeat; i++) { int s = 0; diff --git a/test/diagonal.cpp b/test/diagonal.cpp index c1546e97d..4e8c4b3c9 100644 --- a/test/diagonal.cpp +++ b/test/diagonal.cpp @@ -11,7 +11,6 @@ template<typename MatrixType> void diagonal(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); @@ -66,6 +65,9 @@ template<typename MatrixType> void diagonal(const MatrixType& m) m2.diagonal(N2).coeffRef(0) = Scalar(2)*s1; VERIFY_IS_APPROX(m2.diagonal(N2).coeff(0), Scalar(2)*s1); } + + VERIFY( m1.diagonal( cols).size()==0 ); + VERIFY( m1.diagonal(-rows).size()==0 ); } template<typename MatrixType> void diagonal_assert(const MatrixType& m) { @@ -81,9 +83,12 @@ template<typename MatrixType> void diagonal_assert(const MatrixType& m) { VERIFY_RAISES_ASSERT( m1.array() *= m1.diagonal().array() ); VERIFY_RAISES_ASSERT( m1.array() /= m1.diagonal().array() ); } + + VERIFY_RAISES_ASSERT( m1.diagonal(cols+1) ); + VERIFY_RAISES_ASSERT( m1.diagonal(-(rows+1)) ); } -void test_diagonal() +EIGEN_DECLARE_TEST(diagonal) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( diagonal(Matrix<float, 1, 1>()) ); @@ -95,7 +100,6 @@ void test_diagonal() CALL_SUBTEST_2( diagonal(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_1( diagonal(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_1( diagonal(Matrix<float,Dynamic,4>(3, 4)) ); + CALL_SUBTEST_1( diagonal_assert(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); } - - CALL_SUBTEST_1( diagonal_assert(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); } diff --git a/test/diagonal_matrix_variadic_ctor.cpp b/test/diagonal_matrix_variadic_ctor.cpp new file mode 100644 index 000000000..fbc8f8470 --- /dev/null +++ b/test/diagonal_matrix_variadic_ctor.cpp @@ -0,0 +1,185 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 David Tellenbach <david.tellenbach@tellnotes.org> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_NO_STATIC_ASSERT + +#include "main.h" + +template <typename Scalar> +void assertionTest() +{ + typedef DiagonalMatrix<Scalar, 5> DiagMatrix5; + typedef DiagonalMatrix<Scalar, 7> DiagMatrix7; + typedef DiagonalMatrix<Scalar, Dynamic> DiagMatrixX; + + Scalar raw[6]; + for (int i = 0; i < 6; ++i) { + raw[i] = internal::random<Scalar>(); + } + + VERIFY_RAISES_ASSERT((DiagMatrix5{raw[0], raw[1], raw[2], raw[3]})); + VERIFY_RAISES_ASSERT((DiagMatrix5{raw[0], raw[1], raw[3]})); + VERIFY_RAISES_ASSERT((DiagMatrix7{raw[0], raw[1], raw[2], raw[3]})); + + VERIFY_RAISES_ASSERT((DiagMatrixX { + {raw[0], raw[1], raw[2]}, + {raw[3], raw[4], raw[5]} + })); +} + +#define VERIFY_IMPLICIT_CONVERSION_3(DIAGTYPE, V0, V1, V2) \ + DIAGTYPE d(V0, V1, V2); \ + DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ + VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ + VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ + VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); + +#define VERIFY_IMPLICIT_CONVERSION_4(DIAGTYPE, V0, V1, V2, V3) \ + DIAGTYPE d(V0, V1, V2, V3); \ + DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ + VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ + VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ + VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \ + VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); + +#define VERIFY_IMPLICIT_CONVERSION_5(DIAGTYPE, V0, V1, V2, V3, V4) \ + DIAGTYPE d(V0, V1, V2, V3, V4); \ + DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ + VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ + VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ + VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \ + VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); \ + VERIFY_IS_APPROX(Dense(4, 4), (Scalar)V4); + +template<typename Scalar> +void constructorTest() +{ + typedef DiagonalMatrix<Scalar, 0> DiagonalMatrix0; + typedef DiagonalMatrix<Scalar, 3> DiagonalMatrix3; + typedef DiagonalMatrix<Scalar, 4> DiagonalMatrix4; + typedef DiagonalMatrix<Scalar, Dynamic> DiagonalMatrixX; + + Scalar raw[7]; + for (int k = 0; k < 7; ++k) raw[k] = internal::random<Scalar>(); + + // Fixed-sized matrices + { + DiagonalMatrix0 a {{}}; + VERIFY(a.rows() == 0); + VERIFY(a.cols() == 0); + typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}}; + VERIFY(a.rows() == 3); + VERIFY(a.cols() == 3); + typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}}; + VERIFY(a.rows() == 4); + VERIFY(a.cols() == 4); + typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + + // dynamically sized matrices + { + DiagonalMatrixX a{{}}; + VERIFY(a.rows() == 0); + VERIFY(a.rows() == 0); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}}; + VERIFY(a.rows() == 7); + VERIFY(a.rows() == 7); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } +} + +template<> +void constructorTest<float>() +{ + typedef float Scalar; + + typedef DiagonalMatrix<Scalar, 0> DiagonalMatrix0; + typedef DiagonalMatrix<Scalar, 3> DiagonalMatrix3; + typedef DiagonalMatrix<Scalar, 4> DiagonalMatrix4; + typedef DiagonalMatrix<Scalar, 5> DiagonalMatrix5; + typedef DiagonalMatrix<Scalar, Dynamic> DiagonalMatrixX; + + Scalar raw[7]; + for (int k = 0; k < 7; ++k) raw[k] = internal::random<Scalar>(); + + // Fixed-sized matrices + { + DiagonalMatrix0 a {{}}; + VERIFY(a.rows() == 0); + VERIFY(a.cols() == 0); + typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}}; + VERIFY(a.rows() == 3); + VERIFY(a.cols() == 3); + typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}}; + VERIFY(a.rows() == 4); + VERIFY(a.cols() == 4); + typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + + // dynamically sized matrices + { + DiagonalMatrixX a{{}}; + VERIFY(a.rows() == 0); + VERIFY(a.rows() == 0); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}}; + VERIFY(a.rows() == 7); + VERIFY(a.rows() == 7); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { VERIFY_IMPLICIT_CONVERSION_3(DiagonalMatrix3, 1.2647, 2.56f, -3); } + { VERIFY_IMPLICIT_CONVERSION_4(DiagonalMatrix4, 1.2647, 2.56f, -3, 3.23f); } + { VERIFY_IMPLICIT_CONVERSION_5(DiagonalMatrix5, 1.2647, 2.56f, -3, 3.23f, 2); } +} + +EIGEN_DECLARE_TEST(diagonal_matrix_variadic_ctor) +{ + CALL_SUBTEST_1(assertionTest<unsigned char>()); + CALL_SUBTEST_1(assertionTest<float>()); + CALL_SUBTEST_1(assertionTest<Index>()); + CALL_SUBTEST_1(assertionTest<int>()); + CALL_SUBTEST_1(assertionTest<long int>()); + CALL_SUBTEST_1(assertionTest<std::ptrdiff_t>()); + CALL_SUBTEST_1(assertionTest<std::complex<double>>()); + + CALL_SUBTEST_2(constructorTest<unsigned char>()); + CALL_SUBTEST_2(constructorTest<float>()); + CALL_SUBTEST_2(constructorTest<Index>()); + CALL_SUBTEST_2(constructorTest<int>()); + CALL_SUBTEST_2(constructorTest<long int>()); + CALL_SUBTEST_2(constructorTest<std::ptrdiff_t>()); + CALL_SUBTEST_2(constructorTest<std::complex<double>>()); +} diff --git a/test/diagonalmatrices.cpp b/test/diagonalmatrices.cpp index cd6dc8cf0..276beade0 100644 --- a/test/diagonalmatrices.cpp +++ b/test/diagonalmatrices.cpp @@ -11,7 +11,6 @@ using namespace std; template<typename MatrixType> void diagonalmatrices(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime }; typedef Matrix<Scalar, Rows, 1> VectorType; @@ -30,6 +29,7 @@ template<typename MatrixType> void diagonalmatrices(const MatrixType& m) v2 = VectorType::Random(rows); RowVectorType rv1 = RowVectorType::Random(cols), rv2 = RowVectorType::Random(cols); + LeftDiagonalMatrix ldm1(v1), ldm2(v2); RightDiagonalMatrix rdm1(rv1), rdm2(rv2); @@ -99,6 +99,45 @@ template<typename MatrixType> void diagonalmatrices(const MatrixType& m) VERIFY_IS_APPROX( (sq_m1 += (s1*v1).asDiagonal()), sq_m2 += (s1*v1).asDiagonal().toDenseMatrix() ); VERIFY_IS_APPROX( (sq_m1 -= (s1*v1).asDiagonal()), sq_m2 -= (s1*v1).asDiagonal().toDenseMatrix() ); VERIFY_IS_APPROX( (sq_m1 = (s1*v1).asDiagonal()), (s1*v1).asDiagonal().toDenseMatrix() ); + + sq_m1.setRandom(); + sq_m2 = v1.asDiagonal(); + sq_m2 = sq_m1 * sq_m2; + VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).col(i), sq_m2.col(i) ); + VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).row(i), sq_m2.row(i) ); + + sq_m1 = v1.asDiagonal(); + sq_m2 = v2.asDiagonal(); + SquareMatrixType sq_m3 = v1.asDiagonal(); + VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() + v2.asDiagonal(), sq_m1 + sq_m2); + VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() - v2.asDiagonal(), sq_m1 - sq_m2); + VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() - 2*v2.asDiagonal() + v1.asDiagonal(), sq_m1 - 2*sq_m2 + sq_m1); +} + +template<typename MatrixType> void as_scalar_product(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; + typedef Matrix<Scalar, Dynamic, Dynamic> DynMatrixType; + typedef Matrix<Scalar, Dynamic, 1> DynVectorType; + typedef Matrix<Scalar, 1, Dynamic> DynRowVectorType; + + Index rows = m.rows(); + Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + + VectorType v1 = VectorType::Random(rows); + DynVectorType dv1 = DynVectorType::Random(depth); + DynRowVectorType drv1 = DynRowVectorType::Random(depth); + DynMatrixType dm1 = dv1; + DynMatrixType drm1 = drv1; + + Scalar s = v1(0); + + VERIFY_IS_APPROX( v1.asDiagonal() * drv1, s*drv1 ); + VERIFY_IS_APPROX( dv1 * v1.asDiagonal(), dv1*s ); + + VERIFY_IS_APPROX( v1.asDiagonal() * drm1, s*drm1 ); + VERIFY_IS_APPROX( dm1 * v1.asDiagonal(), dm1*s ); } template<int> @@ -112,18 +151,23 @@ void bug987() VERIFY_IS_APPROX(( res1 = points.topLeftCorner<2,2>()*diag.asDiagonal()) , res2 = tmp2*diag.asDiagonal() ); } -void test_diagonalmatrices() +EIGEN_DECLARE_TEST(diagonalmatrices) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( diagonalmatrices(Matrix<float, 1, 1>()) ); + CALL_SUBTEST_1( as_scalar_product(Matrix<float, 1, 1>()) ); + CALL_SUBTEST_2( diagonalmatrices(Matrix3f()) ); CALL_SUBTEST_3( diagonalmatrices(Matrix<double,3,3,RowMajor>()) ); CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) ); CALL_SUBTEST_5( diagonalmatrices(Matrix<float,4,4,RowMajor>()) ); CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( as_scalar_product(MatrixXcf(1,1)) ); CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( diagonalmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_9( diagonalmatrices(MatrixXf(1,1)) ); + CALL_SUBTEST_9( as_scalar_product(MatrixXf(1,1)) ); } CALL_SUBTEST_10( bug987<0>() ); } diff --git a/test/dontalign.cpp b/test/dontalign.cpp index 4643cfed6..2e4102b86 100644 --- a/test/dontalign.cpp +++ b/test/dontalign.cpp @@ -19,7 +19,6 @@ template<typename MatrixType> void dontalign(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType; @@ -45,7 +44,7 @@ void dontalign(const MatrixType& m) internal::aligned_delete(array, rows); } -void test_dontalign() +EIGEN_DECLARE_TEST(dontalign) { #if defined EIGEN_TEST_PART_1 || defined EIGEN_TEST_PART_5 dontalign(Matrix3d()); diff --git a/test/dynalloc.cpp b/test/dynalloc.cpp index f1cc70bee..23c90a7b5 100644 --- a/test/dynalloc.cpp +++ b/test/dynalloc.cpp @@ -15,6 +15,7 @@ #define ALIGNMENT 1 #endif +typedef Matrix<float,16,1> Vector16f; typedef Matrix<float,8,1> Vector8f; void check_handmade_aligned_malloc() @@ -70,7 +71,7 @@ struct MyStruct { EIGEN_MAKE_ALIGNED_OPERATOR_NEW char dummychar; - Vector8f avec; + Vector16f avec; }; class MyClassA @@ -78,7 +79,7 @@ class MyClassA public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW char dummychar; - Vector8f avec; + Vector16f avec; }; template<typename T> void check_dynaligned() @@ -106,7 +107,7 @@ template<typename T> void check_custom_new_delete() delete[] t; } -#if EIGEN_MAX_ALIGN_BYTES>0 +#if EIGEN_MAX_ALIGN_BYTES>0 && (!EIGEN_HAS_CXX17_OVERALIGN) { T* t = static_cast<T *>((T::operator new)(sizeof(T))); (T::operator delete)(t, sizeof(T)); @@ -119,7 +120,7 @@ template<typename T> void check_custom_new_delete() #endif } -void test_dynalloc() +EIGEN_DECLARE_TEST(dynalloc) { // low level dynamic memory allocation CALL_SUBTEST(check_handmade_aligned_malloc()); @@ -145,6 +146,7 @@ void test_dynalloc() CALL_SUBTEST(check_dynaligned<Vector4d>() ); CALL_SUBTEST(check_dynaligned<Vector4i>() ); CALL_SUBTEST(check_dynaligned<Vector8f>() ); + CALL_SUBTEST(check_dynaligned<Vector16f>() ); } { diff --git a/test/eigen2support.cpp b/test/eigen2support.cpp index ad1d98091..49d7328e9 100644 --- a/test/eigen2support.cpp +++ b/test/eigen2support.cpp @@ -13,7 +13,6 @@ template<typename MatrixType> void eigen2support(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); @@ -53,7 +52,7 @@ template<typename MatrixType> void eigen2support(const MatrixType& m) m1.minor(0,0); } -void test_eigen2support() +EIGEN_DECLARE_TEST(eigen2support) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eigen2support(Matrix<double,1,1>()) ); diff --git a/test/eigensolver_complex.cpp b/test/eigensolver_complex.cpp index 293b1b265..c5373f420 100644 --- a/test/eigensolver_complex.cpp +++ b/test/eigensolver_complex.cpp @@ -47,7 +47,7 @@ template<typename MatrixType> bool find_pivot(typename MatrixType::Scalar tol, M return false; } -/* Check that two column vectors are approximately equal upto permutations. +/* Check that two column vectors are approximately equal up to permutations. * Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(), * however this strategy is numerically inacurate because of numerical cancellation issues. */ @@ -71,7 +71,6 @@ void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& template<typename MatrixType> void eigensolver(const MatrixType& m) { - typedef typename MatrixType::Index Index; /* this test covers the following files: ComplexEigenSolver.h, and indirectly ComplexSchur.h */ @@ -153,7 +152,7 @@ template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m VERIFY_RAISES_ASSERT(eig.eigenvectors()); } -void test_eigensolver_complex() +EIGEN_DECLARE_TEST(eigensolver_complex) { int s = 0; for(int i = 0; i < g_repeat; i++) { diff --git a/test/eigensolver_generalized_real.cpp b/test/eigensolver_generalized_real.cpp index 9c0838ba4..95ed431db 100644 --- a/test/eigensolver_generalized_real.cpp +++ b/test/eigensolver_generalized_real.cpp @@ -15,7 +15,6 @@ template<typename MatrixType> void generalized_eigensolver_real(const MatrixType& m) { - typedef typename MatrixType::Index Index; /* this test covers the following files: GeneralizedEigenSolver.h */ @@ -77,9 +76,16 @@ template<typename MatrixType> void generalized_eigensolver_real(const MatrixType GeneralizedEigenSolver<MatrixType> eig2(a.adjoint() * a,b.adjoint() * b); eig2.compute(a.adjoint() * a,b.adjoint() * b); } + + // check without eigenvectors + { + GeneralizedEigenSolver<MatrixType> eig1(spdA, spdB, true); + GeneralizedEigenSolver<MatrixType> eig2(spdA, spdB, false); + VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues()); + } } -void test_eigensolver_generalized_real() +EIGEN_DECLARE_TEST(eigensolver_generalized_real) { for(int i = 0; i < g_repeat; i++) { int s = 0; diff --git a/test/eigensolver_generic.cpp b/test/eigensolver_generic.cpp index d0e644d4b..7adb98665 100644 --- a/test/eigensolver_generic.cpp +++ b/test/eigensolver_generic.cpp @@ -12,9 +12,23 @@ #include <limits> #include <Eigen/Eigenvalues> +template<typename EigType,typename MatType> +void check_eigensolver_for_given_mat(const EigType &eig, const MatType& a) +{ + typedef typename NumTraits<typename MatType::Scalar>::Real RealScalar; + typedef Matrix<RealScalar, MatType::RowsAtCompileTime, 1> RealVectorType; + typedef typename std::complex<RealScalar> Complex; + Index n = a.rows(); + VERIFY_IS_EQUAL(eig.info(), Success); + VERIFY_IS_APPROX(a * eig.pseudoEigenvectors(), eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()); + VERIFY_IS_APPROX(a.template cast<Complex>() * eig.eigenvectors(), + eig.eigenvectors() * eig.eigenvalues().asDiagonal()); + VERIFY_IS_APPROX(eig.eigenvectors().colwise().norm(), RealVectorType::Ones(n).transpose()); + VERIFY_IS_APPROX(a.eigenvalues(), eig.eigenvalues()); +} + template<typename MatrixType> void eigensolver(const MatrixType& m) { - typedef typename MatrixType::Index Index; /* this test covers the following files: EigenSolver.h */ @@ -23,8 +37,7 @@ template<typename MatrixType> void eigensolver(const MatrixType& m) typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; - typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType; - typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex; + typedef typename std::complex<RealScalar> Complex; MatrixType a = MatrixType::Random(rows,cols); MatrixType a1 = MatrixType::Random(rows,cols); @@ -37,12 +50,7 @@ template<typename MatrixType> void eigensolver(const MatrixType& m) (ei0.pseudoEigenvectors().template cast<Complex>()) * (ei0.eigenvalues().asDiagonal())); EigenSolver<MatrixType> ei1(a); - VERIFY_IS_EQUAL(ei1.info(), Success); - VERIFY_IS_APPROX(a * ei1.pseudoEigenvectors(), ei1.pseudoEigenvectors() * ei1.pseudoEigenvalueMatrix()); - VERIFY_IS_APPROX(a.template cast<Complex>() * ei1.eigenvectors(), - ei1.eigenvectors() * ei1.eigenvalues().asDiagonal()); - VERIFY_IS_APPROX(ei1.eigenvectors().colwise().norm(), RealVectorType::Ones(rows).transpose()); - VERIFY_IS_APPROX(a.eigenvalues(), ei1.eigenvalues()); + CALL_SUBTEST( check_eigensolver_for_given_mat(ei1,a) ); EigenSolver<MatrixType> ei2; ei2.setMaxIterations(RealSchur<MatrixType>::m_maxIterationsPerRow * rows).compute(a); @@ -68,7 +76,7 @@ template<typename MatrixType> void eigensolver(const MatrixType& m) // Test matrix with NaN a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN(); EigenSolver<MatrixType> eiNaN(a); - VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence); + VERIFY_IS_NOT_EQUAL(eiNaN.info(), Success); } // regression test for bug 1098 @@ -101,7 +109,104 @@ template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors()); } -void test_eigensolver_generic() + +template<typename CoeffType> +Matrix<typename CoeffType::Scalar,Dynamic,Dynamic> +make_companion(const CoeffType& coeffs) +{ + Index n = coeffs.size()-1; + Matrix<typename CoeffType::Scalar,Dynamic,Dynamic> res(n,n); + res.setZero(); + res.row(0) = -coeffs.tail(n) / coeffs(0); + res.diagonal(-1).setOnes(); + return res; +} + +template<int> +void eigensolver_generic_extra() +{ + { + // regression test for bug 793 + MatrixXd a(3,3); + a << 0, 0, 1, + 1, 1, 1, + 1, 1e+200, 1; + Eigen::EigenSolver<MatrixXd> eig(a); + double scale = 1e-200; // scale to avoid overflow during the comparisons + VERIFY_IS_APPROX(a * eig.pseudoEigenvectors()*scale, eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()*scale); + VERIFY_IS_APPROX(a * eig.eigenvectors()*scale, eig.eigenvectors() * eig.eigenvalues().asDiagonal()*scale); + } + { + // check a case where all eigenvalues are null. + MatrixXd a(2,2); + a << 1, 1, + -1, -1; + Eigen::EigenSolver<MatrixXd> eig(a); + VERIFY_IS_APPROX(eig.pseudoEigenvectors().squaredNorm(), 2.); + VERIFY_IS_APPROX((a * eig.pseudoEigenvectors()).norm()+1., 1.); + VERIFY_IS_APPROX((eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()).norm()+1., 1.); + VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.); + VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.); + } + + // regression test for bug 933 + { + { + VectorXd coeffs(5); coeffs << 1, -3, -175, -225, 2250; + MatrixXd C = make_companion(coeffs); + EigenSolver<MatrixXd> eig(C); + CALL_SUBTEST( check_eigensolver_for_given_mat(eig,C) ); + } + { + // this test is tricky because it requires high accuracy in smallest eigenvalues + VectorXd coeffs(5); coeffs << 6.154671e-15, -1.003870e-10, -9.819570e-01, 3.995715e+03, 2.211511e+08; + MatrixXd C = make_companion(coeffs); + EigenSolver<MatrixXd> eig(C); + CALL_SUBTEST( check_eigensolver_for_given_mat(eig,C) ); + Index n = C.rows(); + for(Index i=0;i<n;++i) + { + typedef std::complex<double> Complex; + MatrixXcd ac = C.cast<Complex>(); + ac.diagonal().array() -= eig.eigenvalues()(i); + VectorXd sv = ac.jacobiSvd().singularValues(); + // comparing to sv(0) is not enough here to catch the "bug", + // the hard-coded 1.0 is important! + VERIFY_IS_MUCH_SMALLER_THAN(sv(n-1), 1.0); + } + } + } + // regression test for bug 1557 + { + // this test is interesting because it contains zeros on the diagonal. + MatrixXd A_bug1557(3,3); + A_bug1557 << 0, 0, 0, 1, 0, 0.5887907064808635127, 0, 1, 0; + EigenSolver<MatrixXd> eig(A_bug1557); + CALL_SUBTEST( check_eigensolver_for_given_mat(eig,A_bug1557) ); + } + + // regression test for bug 1174 + { + Index n = 12; + MatrixXf A_bug1174(n,n); + A_bug1174 << 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, + 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, + 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, + 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, + 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0; + EigenSolver<MatrixXf> eig(A_bug1174); + CALL_SUBTEST( check_eigensolver_for_given_mat(eig,A_bug1174) ); + } +} + +EIGEN_DECLARE_TEST(eigensolver_generic) { int s = 0; for(int i = 0; i < g_repeat; i++) { @@ -136,31 +241,7 @@ void test_eigensolver_generic() } ); -#ifdef EIGEN_TEST_PART_2 - { - // regression test for bug 793 - MatrixXd a(3,3); - a << 0, 0, 1, - 1, 1, 1, - 1, 1e+200, 1; - Eigen::EigenSolver<MatrixXd> eig(a); - double scale = 1e-200; // scale to avoid overflow during the comparisons - VERIFY_IS_APPROX(a * eig.pseudoEigenvectors()*scale, eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()*scale); - VERIFY_IS_APPROX(a * eig.eigenvectors()*scale, eig.eigenvectors() * eig.eigenvalues().asDiagonal()*scale); - } - { - // check a case where all eigenvalues are null. - MatrixXd a(2,2); - a << 1, 1, - -1, -1; - Eigen::EigenSolver<MatrixXd> eig(a); - VERIFY_IS_APPROX(eig.pseudoEigenvectors().squaredNorm(), 2.); - VERIFY_IS_APPROX((a * eig.pseudoEigenvectors()).norm()+1., 1.); - VERIFY_IS_APPROX((eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()).norm()+1., 1.); - VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.); - VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.); - } -#endif + CALL_SUBTEST_2( eigensolver_generic_extra<0>() ); TEST_SET_BUT_UNUSED_VARIABLE(s) } diff --git a/test/eigensolver_selfadjoint.cpp b/test/eigensolver_selfadjoint.cpp index 39ad4130e..0fb2f4da7 100644 --- a/test/eigensolver_selfadjoint.cpp +++ b/test/eigensolver_selfadjoint.cpp @@ -68,7 +68,6 @@ template<typename MatrixType> void selfadjointeigensolver_essential_check(const template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m) { - typedef typename MatrixType::Index Index; /* this test covers the following files: EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h) */ @@ -231,19 +230,25 @@ void bug_1204() SelfAdjointEigenSolver<Eigen::SparseMatrix<double> > eig(A); } -void test_eigensolver_selfadjoint() +EIGEN_DECLARE_TEST(eigensolver_selfadjoint) { int s = 0; for(int i = 0; i < g_repeat; i++) { + // trivial test for 1x1 matrices: CALL_SUBTEST_1( selfadjointeigensolver(Matrix<float, 1, 1>())); CALL_SUBTEST_1( selfadjointeigensolver(Matrix<double, 1, 1>())); + CALL_SUBTEST_1( selfadjointeigensolver(Matrix<std::complex<double>, 1, 1>())); + // very important to test 3x3 and 2x2 matrices since we provide special paths for them CALL_SUBTEST_12( selfadjointeigensolver(Matrix2f()) ); CALL_SUBTEST_12( selfadjointeigensolver(Matrix2d()) ); + CALL_SUBTEST_12( selfadjointeigensolver(Matrix2cd()) ); CALL_SUBTEST_13( selfadjointeigensolver(Matrix3f()) ); CALL_SUBTEST_13( selfadjointeigensolver(Matrix3d()) ); + CALL_SUBTEST_13( selfadjointeigensolver(Matrix3cd()) ); CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) ); + CALL_SUBTEST_2( selfadjointeigensolver(Matrix4cd()) ); s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) ); @@ -255,6 +260,8 @@ void test_eigensolver_selfadjoint() // some trivial but implementation-wise tricky cases CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(1,1)) ); CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(2,2)) ); + CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(1,1)) ); + CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(2,2)) ); CALL_SUBTEST_6( selfadjointeigensolver(Matrix<double,1,1>()) ); CALL_SUBTEST_7( selfadjointeigensolver(Matrix<double,2,2>()) ); } diff --git a/test/evaluators.cpp b/test/evaluators.cpp index aed5a05a7..2810cd265 100644 --- a/test/evaluators.cpp +++ b/test/evaluators.cpp @@ -90,6 +90,12 @@ namespace Eigen { { call_assignment_no_alias(dst.expression(), src, func); } + + template<typename Dst, template <typename> class StorageBase, typename Src, typename Func> + EIGEN_DEVICE_FUNC void call_restricted_packet_assignment(const NoAlias<Dst,StorageBase>& dst, const Src& src, const Func& func) + { + call_restricted_packet_assignment_no_alias(dst.expression(), src, func); + } } } @@ -101,7 +107,7 @@ using namespace std; #define VERIFY_IS_APPROX_EVALUATOR(DEST,EXPR) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (EXPR).eval()); #define VERIFY_IS_APPROX_EVALUATOR2(DEST,EXPR,REF) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (REF).eval()); -void test_evaluators() +EIGEN_DECLARE_TEST(evaluators) { // Testing Matrix evaluator and Transpose Vector2d v = Vector2d::Random(); @@ -496,4 +502,24 @@ void test_evaluators() VERIFY_IS_EQUAL( get_cost(a*(a+b)), 1); VERIFY_IS_EQUAL( get_cost(a.lazyProduct(a+b)), 15); } + + // regression test for PR 544 and bug 1622 (introduced in #71609c4) + { + // test restricted_packet_assignment with an unaligned destination + const size_t M = 2; + const size_t K = 2; + const size_t N = 5; + float *destMem = new float[(M*N) + 1]; + float *dest = (internal::UIntPtr(destMem)%EIGEN_MAX_ALIGN_BYTES) == 0 ? destMem+1 : destMem; + + const Matrix<float, Dynamic, Dynamic, RowMajor> a = Matrix<float, Dynamic, Dynamic, RowMajor>::Random(M, K); + const Matrix<float, Dynamic, Dynamic, RowMajor> b = Matrix<float, Dynamic, Dynamic, RowMajor>::Random(K, N); + + Map<Matrix<float, Dynamic, Dynamic, RowMajor> > z(dest, M, N);; + Product<Matrix<float, Dynamic, Dynamic, RowMajor>, Matrix<float, Dynamic, Dynamic, RowMajor>, LazyProduct> tmp(a,b); + internal::call_restricted_packet_assignment(z.noalias(), tmp.derived(), internal::assign_op<float, float>()); + + VERIFY_IS_APPROX(z, a*b); + delete[] destMem; + } } diff --git a/test/exceptions.cpp b/test/exceptions.cpp index b83fb82ba..3d93060ab 100644 --- a/test/exceptions.cpp +++ b/test/exceptions.cpp @@ -8,93 +8,34 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -// Various sanity tests with exceptions: +// Various sanity tests with exceptions and non trivially copyable scalar type. // - no memory leak when a custom scalar type trow an exceptions // - todo: complete the list of tests! #define EIGEN_STACK_ALLOCATION_LIMIT 100000000 #include "main.h" - -struct my_exception -{ - my_exception() {} - ~my_exception() {} -}; - -class ScalarWithExceptions -{ - public: - ScalarWithExceptions() { init(); } - ScalarWithExceptions(const float& _v) { init(); *v = _v; } - ScalarWithExceptions(const ScalarWithExceptions& other) { init(); *v = *(other.v); } - ~ScalarWithExceptions() { - delete v; - instances--; - } - - void init() { - v = new float; - instances++; - } - - ScalarWithExceptions operator+(const ScalarWithExceptions& other) const - { - countdown--; - if(countdown<=0) - throw my_exception(); - return ScalarWithExceptions(*v+*other.v); - } - - ScalarWithExceptions operator-(const ScalarWithExceptions& other) const - { return ScalarWithExceptions(*v-*other.v); } - - ScalarWithExceptions operator*(const ScalarWithExceptions& other) const - { return ScalarWithExceptions((*v)*(*other.v)); } - - ScalarWithExceptions& operator+=(const ScalarWithExceptions& other) - { *v+=*other.v; return *this; } - ScalarWithExceptions& operator-=(const ScalarWithExceptions& other) - { *v-=*other.v; return *this; } - ScalarWithExceptions& operator=(const ScalarWithExceptions& other) - { *v = *(other.v); return *this; } - - bool operator==(const ScalarWithExceptions& other) const - { return *v==*other.v; } - bool operator!=(const ScalarWithExceptions& other) const - { return *v!=*other.v; } - - float* v; - static int instances; - static int countdown; -}; - -ScalarWithExceptions real(const ScalarWithExceptions &x) { return x; } -ScalarWithExceptions imag(const ScalarWithExceptions & ) { return 0; } -ScalarWithExceptions conj(const ScalarWithExceptions &x) { return x; } - -int ScalarWithExceptions::instances = 0; -int ScalarWithExceptions::countdown = 0; - +#include "AnnoyingScalar.h" #define CHECK_MEMLEAK(OP) { \ - ScalarWithExceptions::countdown = 100; \ - int before = ScalarWithExceptions::instances; \ - bool exception_thrown = false; \ - try { OP; } \ + AnnoyingScalar::countdown = 100; \ + int before = AnnoyingScalar::instances; \ + bool exception_thrown = false; \ + try { OP; } \ catch (my_exception) { \ exception_thrown = true; \ - VERIFY(ScalarWithExceptions::instances==before && "memory leak detected in " && EIGEN_MAKESTRING(OP)); \ + VERIFY(AnnoyingScalar::instances==before && "memory leak detected in " && EIGEN_MAKESTRING(OP)); \ } \ - VERIFY(exception_thrown && " no exception thrown in " && EIGEN_MAKESTRING(OP)); \ + VERIFY( (AnnoyingScalar::dont_throw) || (exception_thrown && " no exception thrown in " && EIGEN_MAKESTRING(OP)) ); \ } -void memoryleak() +EIGEN_DECLARE_TEST(exceptions) { - typedef Eigen::Matrix<ScalarWithExceptions,Dynamic,1> VectorType; - typedef Eigen::Matrix<ScalarWithExceptions,Dynamic,Dynamic> MatrixType; + typedef Eigen::Matrix<AnnoyingScalar,Dynamic,1> VectorType; + typedef Eigen::Matrix<AnnoyingScalar,Dynamic,Dynamic> MatrixType; { + AnnoyingScalar::dont_throw = false; int n = 50; VectorType v0(n), v1(n); MatrixType m0(n,n), m1(n,n), m2(n,n); @@ -104,10 +45,5 @@ void memoryleak() CHECK_MEMLEAK(m2 = m0 * m1 * m2); CHECK_MEMLEAK((v0+v1).dot(v0+v1)); } - VERIFY(ScalarWithExceptions::instances==0 && "global memory leak detected in " && EIGEN_MAKESTRING(OP)); \ -} - -void test_exceptions() -{ - CALL_SUBTEST( memoryleak() ); + VERIFY(AnnoyingScalar::instances==0 && "global memory leak detected in " && EIGEN_MAKESTRING(OP)); } diff --git a/test/fastmath.cpp b/test/fastmath.cpp index cc5db0746..00a1a59b8 100644 --- a/test/fastmath.cpp +++ b/test/fastmath.cpp @@ -43,11 +43,11 @@ void check_inf_nan(bool dryrun) { } else { - VERIFY( !(numext::isfinite)(m(3)) ); - VERIFY( !(numext::isinf)(m(3)) ); - VERIFY( (numext::isnan)(m(3)) ); - VERIFY( !m.allFinite() ); - VERIFY( m.hasNaN() ); + if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !(numext::isfinite)(m(3)) ); g_test_level=0; + if( (std::isinf) (m(3))) g_test_level=1; VERIFY( !(numext::isinf)(m(3)) ); g_test_level=0; + if(!(std::isnan) (m(3))) g_test_level=1; VERIFY( (numext::isnan)(m(3)) ); g_test_level=0; + if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !m.allFinite() ); g_test_level=0; + if(!(std::isnan) (m(3))) g_test_level=1; VERIFY( m.hasNaN() ); g_test_level=0; } T hidden_zero = (std::numeric_limits<T>::min)()*(std::numeric_limits<T>::min)(); m(4) /= hidden_zero; @@ -62,33 +62,33 @@ void check_inf_nan(bool dryrun) { } else { - VERIFY( !(numext::isfinite)(m(4)) ); - VERIFY( (numext::isinf)(m(4)) ); - VERIFY( !(numext::isnan)(m(4)) ); - VERIFY( !m.allFinite() ); - VERIFY( m.hasNaN() ); + if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !(numext::isfinite)(m(4)) ); g_test_level=0; + if(!(std::isinf) (m(3))) g_test_level=1; VERIFY( (numext::isinf)(m(4)) ); g_test_level=0; + if( (std::isnan) (m(3))) g_test_level=1; VERIFY( !(numext::isnan)(m(4)) ); g_test_level=0; + if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !m.allFinite() ); g_test_level=0; + if(!(std::isnan) (m(3))) g_test_level=1; VERIFY( m.hasNaN() ); g_test_level=0; } m(3) = 0; if(dryrun) { std::cout << "std::isfinite(" << m(3) << ") = "; check((std::isfinite)(m(3)),true); std::cout << " ; numext::isfinite = "; check((numext::isfinite)(m(3)), true); std::cout << "\n"; - std::cout << "std::isinf(" << m(3) << ") = "; check((std::isinf)(m(3)),false); std::cout << " ; numext::isinf = "; check((numext::isinf)(m(3)), false); std::cout << "\n"; - std::cout << "std::isnan(" << m(3) << ") = "; check((std::isnan)(m(3)),false); std::cout << " ; numext::isnan = "; check((numext::isnan)(m(3)), false); std::cout << "\n"; + std::cout << "std::isinf(" << m(3) << ") = "; check((std::isinf)(m(3)),false); std::cout << " ; numext::isinf = "; check((numext::isinf)(m(3)), false); std::cout << "\n"; + std::cout << "std::isnan(" << m(3) << ") = "; check((std::isnan)(m(3)),false); std::cout << " ; numext::isnan = "; check((numext::isnan)(m(3)), false); std::cout << "\n"; std::cout << "allFinite: "; check(m.allFinite(), 0); std::cout << "\n"; std::cout << "hasNaN: "; check(m.hasNaN(), 0); std::cout << "\n"; std::cout << "\n\n"; } else { - VERIFY( (numext::isfinite)(m(3)) ); - VERIFY( !(numext::isinf)(m(3)) ); - VERIFY( !(numext::isnan)(m(3)) ); - VERIFY( !m.allFinite() ); - VERIFY( !m.hasNaN() ); + if(!(std::isfinite)(m(3))) g_test_level=1; VERIFY( (numext::isfinite)(m(3)) ); g_test_level=0; + if( (std::isinf) (m(3))) g_test_level=1; VERIFY( !(numext::isinf)(m(3)) ); g_test_level=0; + if( (std::isnan) (m(3))) g_test_level=1; VERIFY( !(numext::isnan)(m(3)) ); g_test_level=0; + if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !m.allFinite() ); g_test_level=0; + if( (std::isnan) (m(3))) g_test_level=1; VERIFY( !m.hasNaN() ); g_test_level=0; } } -void test_fastmath() { +EIGEN_DECLARE_TEST(fastmath) { std::cout << "*** float *** \n\n"; check_inf_nan<float>(true); std::cout << "*** double ***\n\n"; check_inf_nan<double>(true); std::cout << "*** long double *** \n\n"; check_inf_nan<long double>(true); diff --git a/test/first_aligned.cpp b/test/first_aligned.cpp index ae2d4bc42..ed9945077 100644 --- a/test/first_aligned.cpp +++ b/test/first_aligned.cpp @@ -26,7 +26,7 @@ void test_none_aligned_helper(Scalar *array, int size) struct some_non_vectorizable_type { float x; }; -void test_first_aligned() +EIGEN_DECLARE_TEST(first_aligned) { EIGEN_ALIGN16 float array_float[100]; test_first_aligned_helper(array_float, 50); diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp index d2339a651..7b1684f29 100644 --- a/test/geo_alignedbox.cpp +++ b/test/geo_alignedbox.cpp @@ -9,27 +9,33 @@ #include "main.h" #include <Eigen/Geometry> -#include <Eigen/LU> -#include <Eigen/QR> -#include<iostream> using namespace std; +// NOTE the following workaround was needed on some 32 bits builds to kill extra precision of x87 registers. +// It seems that it is not needed anymore, but let's keep it here, just in case... + template<typename T> EIGEN_DONT_INLINE -void kill_extra_precision(T& x) { eigen_assert((void*)(&x) != (void*)0); } +void kill_extra_precision(T& /* x */) { + // This one worked but triggered a warning: + /* eigen_assert((void*)(&x) != (void*)0); */ + // An alternative could be: + /* volatile T tmp = x; */ + /* x = tmp; */ +} -template<typename BoxType> void alignedbox(const BoxType& _box) +template<typename BoxType> void alignedbox(const BoxType& box) { /* this test covers the following files: AlignedBox.h */ - typedef typename BoxType::Index Index; typedef typename BoxType::Scalar Scalar; - typedef typename NumTraits<Scalar>::Real RealScalar; + typedef NumTraits<Scalar> ScalarTraits; + typedef typename ScalarTraits::Real RealScalar; typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType; - const Index dim = _box.dim(); + const Index dim = box.dim(); VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); @@ -40,7 +46,7 @@ template<typename BoxType> void alignedbox(const BoxType& _box) BoxType b0(dim); BoxType b1(VectorType::Random(dim),VectorType::Random(dim)); BoxType b2; - + kill_extra_precision(b1); kill_extra_precision(p0); kill_extra_precision(p1); @@ -62,7 +68,7 @@ template<typename BoxType> void alignedbox(const BoxType& _box) BoxType box2(VectorType::Random(dim)); box2.extend(VectorType::Random(dim)); - VERIFY(box1.intersects(box2) == !box1.intersection(box2).isEmpty()); + VERIFY(box1.intersects(box2) == !box1.intersection(box2).isEmpty()); // alignment -- make sure there is no memory alignment assertion BoxType *bp0 = new BoxType(dim); @@ -80,17 +86,353 @@ template<typename BoxType> void alignedbox(const BoxType& _box) } +template<typename BoxType> void alignedboxTranslatable(const BoxType& box) +{ + typedef typename BoxType::Scalar Scalar; + typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType; + typedef Transform<Scalar, BoxType::AmbientDimAtCompileTime, Isometry> IsometryTransform; + typedef Transform<Scalar, BoxType::AmbientDimAtCompileTime, Affine> AffineTransform; + + alignedbox(box); + + const VectorType Ones = VectorType::Ones(); + const VectorType UnitX = VectorType::UnitX(); + const Index dim = box.dim(); + + // box((-1, -1, -1), (1, 1, 1)) + BoxType a(-Ones, Ones); + + VERIFY_IS_APPROX(a.sizes(), Ones * Scalar(2)); + + BoxType b = a; + VectorType translate = Ones; + translate[0] = Scalar(2); + b.translate(translate); + // translate by (2, 1, 1) -> box((1, 0, 0), (3, 2, 2)) + + VERIFY_IS_APPROX(b.sizes(), Ones * Scalar(2)); + VERIFY_IS_APPROX((b.min)(), UnitX); + VERIFY_IS_APPROX((b.max)(), Ones * Scalar(2) + UnitX); + + // Test transform + + IsometryTransform tf = IsometryTransform::Identity(); + tf.translation() = -translate; + + BoxType c = b.transformed(tf); + // translate by (-2, -1, -1) -> box((-1, -1, -1), (1, 1, 1)) + VERIFY_IS_APPROX(c.sizes(), a.sizes()); + VERIFY_IS_APPROX((c.min)(), (a.min)()); + VERIFY_IS_APPROX((c.max)(), (a.max)()); + + c.transform(tf); + // translate by (-2, -1, -1) -> box((-3, -2, -2), (-1, 0, 0)) + VERIFY_IS_APPROX(c.sizes(), a.sizes()); + VERIFY_IS_APPROX((c.min)(), Ones * Scalar(-2) - UnitX); + VERIFY_IS_APPROX((c.max)(), -UnitX); + + // Scaling + + AffineTransform atf = AffineTransform::Identity(); + atf.scale(Scalar(3)); + c.transform(atf); + // scale by 3 -> box((-9, -6, -6), (-3, 0, 0)) + VERIFY_IS_APPROX(c.sizes(), Scalar(3) * a.sizes()); + VERIFY_IS_APPROX((c.min)(), Ones * Scalar(-6) - UnitX * Scalar(3)); + VERIFY_IS_APPROX((c.max)(), UnitX * Scalar(-3)); + + atf = AffineTransform::Identity(); + atf.scale(Scalar(-3)); + c.transform(atf); + // scale by -3 -> box((27, 18, 18), (9, 0, 0)) + VERIFY_IS_APPROX(c.sizes(), Scalar(9) * a.sizes()); + VERIFY_IS_APPROX((c.min)(), UnitX * Scalar(9)); + VERIFY_IS_APPROX((c.max)(), Ones * Scalar(18) + UnitX * Scalar(9)); + + // Check identity transform within numerical precision. + BoxType transformedC = c.transformed(IsometryTransform::Identity()); + VERIFY_IS_APPROX(transformedC, c); + + for (size_t i = 0; i < 10; ++i) + { + VectorType minCorner; + VectorType maxCorner; + for (Index d = 0; d < dim; ++d) + { + minCorner[d] = internal::random<Scalar>(-10,10); + maxCorner[d] = minCorner[d] + internal::random<Scalar>(0, 10); + } + + c = BoxType(minCorner, maxCorner); + + translate = VectorType::Random(); + c.translate(translate); + + VERIFY_IS_APPROX((c.min)(), minCorner + translate); + VERIFY_IS_APPROX((c.max)(), maxCorner + translate); + } +} + +template<typename Scalar, typename Rotation> +Rotation rotate2D(Scalar angle) { + return Rotation2D<Scalar>(angle); +} + +template<typename Scalar, typename Rotation> +Rotation rotate2DIntegral(typename NumTraits<Scalar>::NonInteger angle) { + typedef typename NumTraits<Scalar>::NonInteger NonInteger; + return Rotation2D<NonInteger>(angle).toRotationMatrix(). + template cast<Scalar>(); +} + +template<typename Scalar, typename Rotation> +Rotation rotate3DZAxis(Scalar angle) { + return AngleAxis<Scalar>(angle, Matrix<Scalar, 3, 1>(0, 0, 1)); +} + +template<typename Scalar, typename Rotation> +Rotation rotate3DZAxisIntegral(typename NumTraits<Scalar>::NonInteger angle) { + typedef typename NumTraits<Scalar>::NonInteger NonInteger; + return AngleAxis<NonInteger>(angle, Matrix<NonInteger, 3, 1>(0, 0, 1)). + toRotationMatrix().template cast<Scalar>(); +} + +template<typename Scalar, typename Rotation> +Rotation rotate4DZWAxis(Scalar angle) { + Rotation result = Matrix<Scalar, 4, 4>::Identity(); + result.block(0, 0, 3, 3) = rotate3DZAxis<Scalar, AngleAxisd>(angle).toRotationMatrix(); + return result; +} + +template <typename MatrixType> +MatrixType randomRotationMatrix() +{ + // algorithm from + // https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/III-7/103/2016/isprs-annals-III-7-103-2016.pdf + const MatrixType rand = MatrixType::Random(); + const MatrixType q = rand.householderQr().householderQ(); + const JacobiSVD<MatrixType> svd = q.jacobiSvd(ComputeFullU | ComputeFullV); + const typename MatrixType::Scalar det = (svd.matrixU() * svd.matrixV().transpose()).determinant(); + MatrixType diag = rand.Identity(); + diag(MatrixType::RowsAtCompileTime - 1, MatrixType::ColsAtCompileTime - 1) = det; + const MatrixType rotation = svd.matrixU() * diag * svd.matrixV().transpose(); + return rotation; +} + +template <typename Scalar, int Dim> +Matrix<Scalar, Dim, (1<<Dim)> boxGetCorners(const Matrix<Scalar, Dim, 1>& min_, const Matrix<Scalar, Dim, 1>& max_) +{ + Matrix<Scalar, Dim, (1<<Dim) > result; + for(Index i=0; i<(1<<Dim); ++i) + { + for(Index j=0; j<Dim; ++j) + result(j,i) = (i & (1<<j)) ? min_(j) : max_(j); + } + return result; +} + +template<typename BoxType, typename Rotation> void alignedboxRotatable( + const BoxType& box, + Rotation (*rotate)(typename NumTraits<typename BoxType::Scalar>::NonInteger /*_angle*/)) +{ + alignedboxTranslatable(box); + + typedef typename BoxType::Scalar Scalar; + typedef typename NumTraits<Scalar>::NonInteger NonInteger; + typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType; + typedef Transform<Scalar, BoxType::AmbientDimAtCompileTime, Isometry> IsometryTransform; + typedef Transform<Scalar, BoxType::AmbientDimAtCompileTime, Affine> AffineTransform; + + const VectorType Zero = VectorType::Zero(); + const VectorType Ones = VectorType::Ones(); + const VectorType UnitX = VectorType::UnitX(); + const VectorType UnitY = VectorType::UnitY(); + // this is vector (0, 0, -1, -1, -1, ...), i.e. with zeros at first and second dimensions + const VectorType UnitZ = Ones - UnitX - UnitY; + + // in this kind of comments the 3D case values will be illustrated + // box((-1, -1, -1), (1, 1, 1)) + BoxType a(-Ones, Ones); + + // to allow templating this test for both 2D and 3D cases, we always set all + // but the first coordinate to the same value; so basically 3D case works as + // if you were looking at the scene from top + + VectorType minPoint = -2 * Ones; + minPoint[0] = -3; + VectorType maxPoint = Zero; + maxPoint[0] = -1; + BoxType c(minPoint, maxPoint); + // box((-3, -2, -2), (-1, 0, 0)) + + IsometryTransform tf2 = IsometryTransform::Identity(); + // for some weird reason the following statement has to be put separate from + // the following rotate call, otherwise precision problems arise... + Rotation rot = rotate(NonInteger(EIGEN_PI)); + tf2.rotate(rot); + + c.transform(tf2); + // rotate by 180 deg around origin -> box((1, 0, -2), (3, 2, 0)) + + VERIFY_IS_APPROX(c.sizes(), a.sizes()); + VERIFY_IS_APPROX((c.min)(), UnitX - UnitZ * Scalar(2)); + VERIFY_IS_APPROX((c.max)(), UnitX * Scalar(3) + UnitY * Scalar(2)); + + rot = rotate(NonInteger(EIGEN_PI / 2)); + tf2.setIdentity(); + tf2.rotate(rot); + + c.transform(tf2); + // rotate by 90 deg around origin -> box((-2, 1, -2), (0, 3, 0)) + + VERIFY_IS_APPROX(c.sizes(), a.sizes()); + VERIFY_IS_APPROX((c.min)(), Ones * Scalar(-2) + UnitY * Scalar(3)); + VERIFY_IS_APPROX((c.max)(), UnitY * Scalar(3)); + + // box((-1, -1, -1), (1, 1, 1)) + AffineTransform atf = AffineTransform::Identity(); + atf.linearExt()(0, 1) = Scalar(1); + c = BoxType(-Ones, Ones); + c.transform(atf); + // 45 deg shear in x direction -> box((-2, -1, -1), (2, 1, 1)) + + VERIFY_IS_APPROX(c.sizes(), Ones * Scalar(2) + UnitX * Scalar(2)); + VERIFY_IS_APPROX((c.min)(), -Ones - UnitX); + VERIFY_IS_APPROX((c.max)(), Ones + UnitX); +} + +template<typename BoxType, typename Rotation> void alignedboxNonIntegralRotatable( + const BoxType& box, + Rotation (*rotate)(typename NumTraits<typename BoxType::Scalar>::NonInteger /*_angle*/)) +{ + alignedboxRotatable(box, rotate); + + typedef typename BoxType::Scalar Scalar; + typedef typename NumTraits<Scalar>::NonInteger NonInteger; + enum { Dim = BoxType::AmbientDimAtCompileTime }; + typedef Matrix<Scalar, Dim, 1> VectorType; + typedef Matrix<Scalar, Dim, (1 << Dim)> CornersType; + typedef Transform<Scalar, Dim, Isometry> IsometryTransform; + typedef Transform<Scalar, Dim, Affine> AffineTransform; + + const Index dim = box.dim(); + const VectorType Zero = VectorType::Zero(); + const VectorType Ones = VectorType::Ones(); + + VectorType minPoint = -2 * Ones; + minPoint[1] = 1; + VectorType maxPoint = Zero; + maxPoint[1] = 3; + BoxType c(minPoint, maxPoint); + // ((-2, 1, -2), (0, 3, 0)) + + VectorType cornerBL = (c.min)(); + VectorType cornerTR = (c.max)(); + VectorType cornerBR = (c.min)(); cornerBR[0] = cornerTR[0]; + VectorType cornerTL = (c.max)(); cornerTL[0] = cornerBL[0]; + + NonInteger angle = NonInteger(EIGEN_PI/3); + Rotation rot = rotate(angle); + IsometryTransform tf2; + tf2.setIdentity(); + tf2.rotate(rot); + + c.transform(tf2); + // rotate by 60 deg -> box((-3.59, -1.23, -2), (-0.86, 1.5, 0)) + + cornerBL = tf2 * cornerBL; + cornerBR = tf2 * cornerBR; + cornerTL = tf2 * cornerTL; + cornerTR = tf2 * cornerTR; + + VectorType minCorner = Ones * Scalar(-2); + VectorType maxCorner = Zero; + minCorner[0] = (min)((min)(cornerBL[0], cornerBR[0]), (min)(cornerTL[0], cornerTR[0])); + maxCorner[0] = (max)((max)(cornerBL[0], cornerBR[0]), (max)(cornerTL[0], cornerTR[0])); + minCorner[1] = (min)((min)(cornerBL[1], cornerBR[1]), (min)(cornerTL[1], cornerTR[1])); + maxCorner[1] = (max)((max)(cornerBL[1], cornerBR[1]), (max)(cornerTL[1], cornerTR[1])); + + for (Index d = 2; d < dim; ++d) + VERIFY_IS_APPROX(c.sizes()[d], Scalar(2)); + + VERIFY_IS_APPROX((c.min)(), minCorner); + VERIFY_IS_APPROX((c.max)(), maxCorner); + + VectorType minCornerValue = Ones * Scalar(-2); + VectorType maxCornerValue = Zero; + minCornerValue[0] = Scalar(Scalar(-sqrt(2*2 + 3*3)) * Scalar(cos(Scalar(atan(2.0/3.0)) - angle/2))); + minCornerValue[1] = Scalar(Scalar(-sqrt(1*1 + 2*2)) * Scalar(sin(Scalar(atan(2.0/1.0)) - angle/2))); + maxCornerValue[0] = Scalar(-sin(angle)); + maxCornerValue[1] = Scalar(3 * cos(angle)); + VERIFY_IS_APPROX((c.min)(), minCornerValue); + VERIFY_IS_APPROX((c.max)(), maxCornerValue); + + // randomized test - translate and rotate the box and compare to a box made of transformed vertices + for (size_t i = 0; i < 10; ++i) + { + for (Index d = 0; d < dim; ++d) + { + minCorner[d] = internal::random<Scalar>(-10,10); + maxCorner[d] = minCorner[d] + internal::random<Scalar>(0, 10); + } + + c = BoxType(minCorner, maxCorner); + + CornersType corners = boxGetCorners(minCorner, maxCorner); + + typename AffineTransform::LinearMatrixType rotation = + randomRotationMatrix<typename AffineTransform::LinearMatrixType>(); + tf2.setIdentity(); + tf2.rotate(rotation); + tf2.translate(VectorType::Random()); + + c.transform(tf2); + corners = tf2 * corners; + + minCorner = corners.rowwise().minCoeff(); + maxCorner = corners.rowwise().maxCoeff(); + + VERIFY_IS_APPROX((c.min)(), minCorner); + VERIFY_IS_APPROX((c.max)(), maxCorner); + } + + // randomized test - transform the box with a random affine matrix and compare to a box made of transformed vertices + for (size_t i = 0; i < 10; ++i) + { + for (Index d = 0; d < dim; ++d) + { + minCorner[d] = internal::random<Scalar>(-10,10); + maxCorner[d] = minCorner[d] + internal::random<Scalar>(0, 10); + } + + c = BoxType(minCorner, maxCorner); + + CornersType corners = boxGetCorners(minCorner, maxCorner); + + AffineTransform atf = AffineTransform::Identity(); + atf.linearExt() = AffineTransform::LinearPart::Random(); + atf.translate(VectorType::Random()); + + c.transform(atf); + corners = atf * corners; + + minCorner = corners.rowwise().minCoeff(); + maxCorner = corners.rowwise().maxCoeff(); + + VERIFY_IS_APPROX((c.min)(), minCorner); + VERIFY_IS_APPROX((c.max)(), maxCorner); + } +} template<typename BoxType> -void alignedboxCastTests(const BoxType& _box) +void alignedboxCastTests(const BoxType& box) { - // casting - typedef typename BoxType::Index Index; + // casting typedef typename BoxType::Scalar Scalar; typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType; - const Index dim = _box.dim(); + const Index dim = box.dim(); VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); @@ -162,25 +504,25 @@ void specificTest2() } -void test_geo_alignedbox() +EIGEN_DECLARE_TEST(geo_alignedbox) { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( alignedbox(AlignedBox2f()) ); + CALL_SUBTEST_1( (alignedboxNonIntegralRotatable<AlignedBox2f, Rotation2Df>(AlignedBox2f(), &rotate2D)) ); CALL_SUBTEST_2( alignedboxCastTests(AlignedBox2f()) ); - CALL_SUBTEST_3( alignedbox(AlignedBox3f()) ); + CALL_SUBTEST_3( (alignedboxNonIntegralRotatable<AlignedBox3f, AngleAxisf>(AlignedBox3f(), &rotate3DZAxis)) ); CALL_SUBTEST_4( alignedboxCastTests(AlignedBox3f()) ); - CALL_SUBTEST_5( alignedbox(AlignedBox4d()) ); + CALL_SUBTEST_5( (alignedboxNonIntegralRotatable<AlignedBox4d, Matrix4d>(AlignedBox4d(), &rotate4DZWAxis)) ); CALL_SUBTEST_6( alignedboxCastTests(AlignedBox4d()) ); - CALL_SUBTEST_7( alignedbox(AlignedBox1d()) ); + CALL_SUBTEST_7( alignedboxTranslatable(AlignedBox1d()) ); CALL_SUBTEST_8( alignedboxCastTests(AlignedBox1d()) ); - CALL_SUBTEST_9( alignedbox(AlignedBox1i()) ); - CALL_SUBTEST_10( alignedbox(AlignedBox2i()) ); - CALL_SUBTEST_11( alignedbox(AlignedBox3i()) ); + CALL_SUBTEST_9( alignedboxTranslatable(AlignedBox1i()) ); + CALL_SUBTEST_10( (alignedboxRotatable<AlignedBox2i, Matrix2i>(AlignedBox2i(), &rotate2DIntegral<int, Matrix2i>)) ); + CALL_SUBTEST_11( (alignedboxRotatable<AlignedBox3i, Matrix3i>(AlignedBox3i(), &rotate3DZAxisIntegral<int, Matrix3i>)) ); CALL_SUBTEST_14( alignedbox(AlignedBox<double,Dynamic>(4)) ); } diff --git a/test/geo_eulerangles.cpp b/test/geo_eulerangles.cpp index 932ebe773..693c627a9 100644 --- a/test/geo_eulerangles.cpp +++ b/test/geo_eulerangles.cpp @@ -103,7 +103,7 @@ template<typename Scalar> void eulerangles() check_all_var(ea); } -void test_geo_eulerangles() +EIGEN_DECLARE_TEST(geo_eulerangles) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eulerangles<float>() ); diff --git a/test/geo_homogeneous.cpp b/test/geo_homogeneous.cpp index 2187c7bf9..9aebe6226 100644 --- a/test/geo_homogeneous.cpp +++ b/test/geo_homogeneous.cpp @@ -115,7 +115,7 @@ template<typename Scalar,int Size> void homogeneous(void) VERIFY_IS_APPROX( (t2.template triangularView<Lower>() * v0.homogeneous()).eval(), (t2.template triangularView<Lower>()*hv0) ); } -void test_geo_homogeneous() +EIGEN_DECLARE_TEST(geo_homogeneous) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( homogeneous<float,1>() )); diff --git a/test/geo_hyperplane.cpp b/test/geo_hyperplane.cpp index 27892850d..44b2f2aec 100644 --- a/test/geo_hyperplane.cpp +++ b/test/geo_hyperplane.cpp @@ -19,7 +19,6 @@ template<typename HyperplaneType> void hyperplane(const HyperplaneType& _plane) Hyperplane.h */ using std::abs; - typedef typename HyperplaneType::Index Index; const Index dim = _plane.dim(); enum { Options = HyperplaneType::Options }; typedef typename HyperplaneType::Scalar Scalar; @@ -118,7 +117,7 @@ template<typename Scalar> void lines() VERIFY_IS_APPROX(result, center); // check conversions between two types of lines - PLine pl(line_u); // gcc 3.3 will commit suicide if we don't name this variable + PLine pl(line_u); // gcc 3.3 will crash if we don't name this variable. HLine line_u2(pl); CoeffsType converted_coeffs = line_u2.coeffs(); if(line_u2.normal().dot(line_u.normal())<Scalar(0)) @@ -173,15 +172,10 @@ template<typename Scalar> void hyperplane_alignment() VERIFY_IS_APPROX(p1->coeffs(), p2->coeffs()); VERIFY_IS_APPROX(p1->coeffs(), p3->coeffs()); - - #if defined(EIGEN_VECTORIZE) && EIGEN_MAX_STATIC_ALIGN_BYTES > 0 - if(internal::packet_traits<Scalar>::Vectorizable && internal::packet_traits<Scalar>::size<=4) - VERIFY_RAISES_ASSERT((::new(reinterpret_cast<void*>(array3u)) Plane3a)); - #endif } -void test_geo_hyperplane() +EIGEN_DECLARE_TEST(geo_hyperplane) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( hyperplane(Hyperplane<float,2>()) ); diff --git a/test/geo_orthomethods.cpp b/test/geo_orthomethods.cpp index e178df257..b7b660740 100644 --- a/test/geo_orthomethods.cpp +++ b/test/geo_orthomethods.cpp @@ -115,7 +115,7 @@ template<typename Scalar, int Size> void orthomethods(int size=Size) VERIFY_IS_APPROX(mcrossN3.row(i), matN3.row(i).cross(vec3)); } -void test_geo_orthomethods() +EIGEN_DECLARE_TEST(geo_orthomethods) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( orthomethods_3<float>() ); diff --git a/test/geo_parametrizedline.cpp b/test/geo_parametrizedline.cpp index 9bf5f3c1d..e4b194abc 100644 --- a/test/geo_parametrizedline.cpp +++ b/test/geo_parametrizedline.cpp @@ -19,12 +19,13 @@ template<typename LineType> void parametrizedline(const LineType& _line) ParametrizedLine.h */ using std::abs; - typedef typename LineType::Index Index; const Index dim = _line.dim(); typedef typename LineType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar, LineType::AmbientDimAtCompileTime, 1> VectorType; typedef Hyperplane<Scalar,LineType::AmbientDimAtCompileTime> HyperplaneType; + typedef Matrix<Scalar, HyperplaneType::AmbientDimAtCompileTime, + HyperplaneType::AmbientDimAtCompileTime> MatrixType; VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); @@ -59,6 +60,31 @@ template<typename LineType> void parametrizedline(const LineType& _line) VERIFY_IS_MUCH_SMALLER_THAN(hp.signedDistance(pi), RealScalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(l0.distance(pi), RealScalar(1)); VERIFY_IS_APPROX(l0.intersectionPoint(hp), pi); + + // transform + if (!NumTraits<Scalar>::IsComplex) + { + MatrixType rot = MatrixType::Random(dim,dim).householderQr().householderQ(); + DiagonalMatrix<Scalar,LineType::AmbientDimAtCompileTime> scaling(VectorType::Random()); + Translation<Scalar,LineType::AmbientDimAtCompileTime> translation(VectorType::Random()); + + while(scaling.diagonal().cwiseAbs().minCoeff()<RealScalar(1e-4)) scaling.diagonal() = VectorType::Random(); + + LineType l1 = l0; + VectorType p3 = l0.pointAt(Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN( l1.transform(rot).distance(rot * p3), Scalar(1) ); + l1 = l0; + VERIFY_IS_MUCH_SMALLER_THAN( l1.transform(rot,Isometry).distance(rot * p3), Scalar(1) ); + l1 = l0; + VERIFY_IS_MUCH_SMALLER_THAN( l1.transform(rot*scaling).distance((rot*scaling) * p3), Scalar(1) ); + l1 = l0; + VERIFY_IS_MUCH_SMALLER_THAN( l1.transform(rot*scaling*translation) + .distance((rot*scaling*translation) * p3), Scalar(1) ); + l1 = l0; + VERIFY_IS_MUCH_SMALLER_THAN( l1.transform(rot*translation,Isometry) + .distance((rot*translation) * p3), Scalar(1) ); + } + } template<typename Scalar> void parametrizedline_alignment() @@ -84,14 +110,9 @@ template<typename Scalar> void parametrizedline_alignment() VERIFY_IS_APPROX(p1->origin(), p3->origin()); VERIFY_IS_APPROX(p1->direction(), p2->direction()); VERIFY_IS_APPROX(p1->direction(), p3->direction()); - - #if defined(EIGEN_VECTORIZE) && EIGEN_MAX_STATIC_ALIGN_BYTES>0 - if(internal::packet_traits<Scalar>::Vectorizable && internal::packet_traits<Scalar>::size<=4) - VERIFY_RAISES_ASSERT((::new(reinterpret_cast<void*>(array3u)) Line4a)); - #endif } -void test_geo_parametrizedline() +EIGEN_DECLARE_TEST(geo_parametrizedline) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( parametrizedline(ParametrizedLine<float,2>()) ); diff --git a/test/geo_quaternion.cpp b/test/geo_quaternion.cpp index 96889e722..c561fc89d 100644 --- a/test/geo_quaternion.cpp +++ b/test/geo_quaternion.cpp @@ -12,6 +12,7 @@ #include <Eigen/Geometry> #include <Eigen/LU> #include <Eigen/SVD> +#include "AnnoyingScalar.h" template<typename T> T bounded_acos(T v) { @@ -74,6 +75,13 @@ template<typename Scalar, int Options> void quaternion(void) q1.coeffs().setRandom(); VERIFY_IS_APPROX(q1.coeffs(), (q1*q2).coeffs()); +#ifndef EIGEN_NO_IO + // Printing + std::ostringstream ss; + ss << q2; + VERIFY(ss.str() == "0i + 0j + 0k + 1"); +#endif + // concatenation q1 *= q2; @@ -85,7 +93,7 @@ template<typename Scalar, int Options> void quaternion(void) if (refangle>Scalar(EIGEN_PI)) refangle = Scalar(2)*Scalar(EIGEN_PI) - refangle; - if((q1.coeffs()-q2.coeffs()).norm() > 10*largeEps) + if((q1.coeffs()-q2.coeffs()).norm() > Scalar(10)*largeEps) { VERIFY_IS_MUCH_SMALLER_THAN(abs(q1.angularDistance(q2) - refangle), Scalar(1)); } @@ -113,7 +121,7 @@ template<typename Scalar, int Options> void quaternion(void) // Do not execute the test if the rotation angle is almost zero, or // the rotation axis and v1 are almost parallel. - if (abs(aa.angle()) > 5*test_precision<Scalar>() + if (abs(aa.angle()) > Scalar(5)*test_precision<Scalar>() && (aa.axis() - v1.normalized()).norm() < Scalar(1.99) && (aa.axis() + v1.normalized()).norm() < Scalar(1.99)) { @@ -210,10 +218,6 @@ template<typename Scalar> void mapQuaternion(void){ VERIFY_IS_APPROX(q1.coeffs(), q2.coeffs()); VERIFY_IS_APPROX(q1.coeffs(), q3.coeffs()); VERIFY_IS_APPROX(q4.coeffs(), q3.coeffs()); - #ifdef EIGEN_VECTORIZE - if(internal::packet_traits<Scalar>::Vectorizable) - VERIFY_RAISES_ASSERT((MQuaternionA(array3unaligned))); - #endif VERIFY_IS_APPROX(mq1 * (mq1.inverse() * v1), v1); VERIFY_IS_APPROX(mq1 * (mq1.conjugate() * v1), v1); @@ -231,6 +235,27 @@ template<typename Scalar> void mapQuaternion(void){ VERIFY_IS_APPROX(mq3*mq2, q3*q2); VERIFY_IS_APPROX(mcq1*mq2, q1*q2); VERIFY_IS_APPROX(mcq3*mq2, q3*q2); + + // Bug 1461, compilation issue with Map<const Quat>::w(), and other reference/constness checks: + VERIFY_IS_APPROX(mcq3.coeffs().x() + mcq3.coeffs().y() + mcq3.coeffs().z() + mcq3.coeffs().w(), mcq3.coeffs().sum()); + VERIFY_IS_APPROX(mcq3.x() + mcq3.y() + mcq3.z() + mcq3.w(), mcq3.coeffs().sum()); + mq3.w() = 1; + const Quaternionx& cq3(q3); + VERIFY( &cq3.x() == &q3.x() ); + const MQuaternionUA& cmq3(mq3); + VERIFY( &cmq3.x() == &mq3.x() ); + // FIXME the following should be ok. The problem is that currently the LValueBit flag + // is used to determine whether we can return a coeff by reference or not, which is not enough for Map<const ...>. + //const MCQuaternionUA& cmcq3(mcq3); + //VERIFY( &cmcq3.x() == &mcq3.x() ); + + // test cast + { + Quaternion<float> q1f = mq1.template cast<float>(); + VERIFY_IS_APPROX(q1f.template cast<Scalar>(),mq1); + Quaternion<double> q1d = mq1.template cast<double>(); + VERIFY_IS_APPROX(q1d.template cast<Scalar>(),mq1); + } } template<typename Scalar> void quaternionAlignment(void){ @@ -252,10 +277,6 @@ template<typename Scalar> void quaternionAlignment(void){ VERIFY_IS_APPROX(q1->coeffs(), q2->coeffs()); VERIFY_IS_APPROX(q1->coeffs(), q3->coeffs()); - #if defined(EIGEN_VECTORIZE) && EIGEN_MAX_STATIC_ALIGN_BYTES>0 - if(internal::packet_traits<Scalar>::Vectorizable && internal::packet_traits<Scalar>::size<=4) - VERIFY_RAISES_ASSERT((::new(reinterpret_cast<void*>(arrayunaligned)) QuaternionA)); - #endif } template<typename PlainObjectType> void check_const_correctness(const PlainObjectType&) @@ -272,18 +293,40 @@ template<typename PlainObjectType> void check_const_correctness(const PlainObjec VERIFY( !(Map<ConstPlainObjectType, Aligned>::Flags & LvalueBit) ); } -void test_geo_quaternion() +#if EIGEN_HAS_RVALUE_REFERENCES + +// Regression for bug 1573 +struct MovableClass { + // The following line is a workaround for gcc 4.7 and 4.8 (see bug 1573 comments). + static_assert(std::is_nothrow_move_constructible<Quaternionf>::value,""); + MovableClass() = default; + MovableClass(const MovableClass&) = default; + MovableClass(MovableClass&&) noexcept = default; + MovableClass& operator=(const MovableClass&) = default; + MovableClass& operator=(MovableClass&&) = default; + Quaternionf m_quat; +}; + +#endif + +EIGEN_DECLARE_TEST(geo_quaternion) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( quaternion<float,AutoAlign>() )); CALL_SUBTEST_1( check_const_correctness(Quaternionf()) ); + CALL_SUBTEST_1(( quaternion<float,DontAlign>() )); + CALL_SUBTEST_1(( quaternionAlignment<float>() )); + CALL_SUBTEST_1( mapQuaternion<float>() ); + CALL_SUBTEST_2(( quaternion<double,AutoAlign>() )); CALL_SUBTEST_2( check_const_correctness(Quaterniond()) ); - CALL_SUBTEST_3(( quaternion<float,DontAlign>() )); - CALL_SUBTEST_4(( quaternion<double,DontAlign>() )); - CALL_SUBTEST_5(( quaternionAlignment<float>() )); - CALL_SUBTEST_6(( quaternionAlignment<double>() )); - CALL_SUBTEST_1( mapQuaternion<float>() ); + CALL_SUBTEST_2(( quaternion<double,DontAlign>() )); + CALL_SUBTEST_2(( quaternionAlignment<double>() )); CALL_SUBTEST_2( mapQuaternion<double>() ); + +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW + AnnoyingScalar::dont_throw = true; +#endif + CALL_SUBTEST_3(( quaternion<AnnoyingScalar,AutoAlign>() )); } } diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp index 278e527c2..72c6edac1 100755..100644 --- a/test/geo_transformations.cpp +++ b/test/geo_transformations.cpp @@ -582,11 +582,6 @@ template<typename Scalar> void transform_alignment() VERIFY_IS_APPROX(p1->matrix(), p3->matrix()); VERIFY_IS_APPROX( (*p1) * (*p1), (*p2)*(*p3)); - - #if defined(EIGEN_VECTORIZE) && EIGEN_MAX_STATIC_ALIGN_BYTES>0 - if(internal::packet_traits<Scalar>::Vectorizable) - VERIFY_RAISES_ASSERT((::new(reinterpret_cast<void*>(array3u)) Projective3a)); - #endif } template<typename Scalar, int Dim, int Options> void transform_products() @@ -612,11 +607,99 @@ template<typename Scalar, int Dim, int Options> void transform_products() VERIFY_IS_APPROX((ac*p).matrix(), a_m*p_m); } -void test_geo_transformations() +template<typename Scalar, int Mode, int Options> void transformations_no_scale() +{ + /* this test covers the following files: + Cross.h Quaternion.h, Transform.h + */ + typedef Matrix<Scalar,3,1> Vector3; + typedef Matrix<Scalar,4,1> Vector4; + typedef Quaternion<Scalar> Quaternionx; + typedef AngleAxis<Scalar> AngleAxisx; + typedef Transform<Scalar,3,Mode,Options> Transform3; + typedef Translation<Scalar,3> Translation3; + typedef Matrix<Scalar,4,4> Matrix4; + + Vector3 v0 = Vector3::Random(), + v1 = Vector3::Random(); + + Transform3 t0, t1, t2; + + Scalar a = internal::random<Scalar>(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); + + Quaternionx q1, q2; + + q1 = AngleAxisx(a, v0.normalized()); + + t0 = Transform3::Identity(); + VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity()); + + t0.setIdentity(); + t1.setIdentity(); + v1 = Vector3::Ones(); + t0.linear() = q1.toRotationMatrix(); + t0.pretranslate(v0); + t1.linear() = q1.conjugate().toRotationMatrix(); + t1.translate(-v0); + + VERIFY((t0 * t1).matrix().isIdentity(test_precision<Scalar>())); + + t1.fromPositionOrientationScale(v0, q1, v1); + VERIFY_IS_APPROX(t1.matrix(), t0.matrix()); + VERIFY_IS_APPROX(t1*v1, t0*v1); + + // translation * vector + t0.setIdentity(); + t0.translate(v0); + VERIFY_IS_APPROX((t0 * v1).template head<3>(), Translation3(v0) * v1); + + // Conversion to matrix. + Transform3 t3; + t3.linear() = q1.toRotationMatrix(); + t3.translation() = v1; + Matrix4 m3 = t3.matrix(); + VERIFY((m3 * m3.inverse()).isIdentity(test_precision<Scalar>())); + // Verify implicit last row is initialized. + VERIFY_IS_APPROX(Vector4(m3.row(3)), Vector4(0.0, 0.0, 0.0, 1.0)); + + VERIFY_IS_APPROX(t3.rotation(), t3.linear()); + if(Mode==Isometry) + VERIFY(t3.rotation().data()==t3.linear().data()); +} + +template<typename Scalar, int Mode, int Options> void transformations_computed_scaling_continuity() +{ + typedef Matrix<Scalar, 3, 1> Vector3; + typedef Transform<Scalar, 3, Mode, Options> Transform3; + typedef Matrix<Scalar, 3, 3> Matrix3; + + // Given: two transforms that differ by '2*eps'. + Scalar eps(1e-3); + Vector3 v0 = Vector3::Random().normalized(), + v1 = Vector3::Random().normalized(), + v3 = Vector3::Random().normalized(); + Transform3 t0, t1; + // The interesting case is when their determinants have different signs. + Matrix3 rank2 = 50 * v0 * v0.adjoint() + 20 * v1 * v1.adjoint(); + t0.linear() = rank2 + eps * v3 * v3.adjoint(); + t1.linear() = rank2 - eps * v3 * v3.adjoint(); + + // When: computing the rotation-scaling parts + Matrix3 r0, s0, r1, s1; + t0.computeRotationScaling(&r0, &s0); + t1.computeRotationScaling(&r1, &s1); + + // Then: the scaling parts should differ by no more than '2*eps'. + const Scalar c(2.1); // 2 + room for rounding errors + VERIFY((s0 - s1).norm() < c * eps); +} + +EIGEN_DECLARE_TEST(geo_transformations) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( transformations<double,Affine,AutoAlign>() )); CALL_SUBTEST_1(( non_projective_only<double,Affine,AutoAlign>() )); + CALL_SUBTEST_1(( transformations_computed_scaling_continuity<double,Affine,AutoAlign>() )); CALL_SUBTEST_2(( transformations<float,AffineCompact,AutoAlign>() )); CALL_SUBTEST_2(( non_projective_only<float,AffineCompact,AutoAlign>() )); @@ -625,7 +708,7 @@ void test_geo_transformations() CALL_SUBTEST_3(( transformations<double,Projective,AutoAlign>() )); CALL_SUBTEST_3(( transformations<double,Projective,DontAlign>() )); CALL_SUBTEST_3(( transform_alignment<double>() )); - + CALL_SUBTEST_4(( transformations<float,Affine,RowMajor|AutoAlign>() )); CALL_SUBTEST_4(( non_projective_only<float,Affine,RowMajor>() )); @@ -641,5 +724,8 @@ void test_geo_transformations() CALL_SUBTEST_8(( transform_associativity<double,2,ColMajor>(Rotation2D<double>(internal::random<double>()*double(EIGEN_PI))) )); CALL_SUBTEST_8(( transform_associativity<double,3,ColMajor>(Quaterniond::UnitRandom()) )); + + CALL_SUBTEST_9(( transformations_no_scale<double,Affine,AutoAlign>() )); + CALL_SUBTEST_9(( transformations_no_scale<double,Isometry,AutoAlign>() )); } } diff --git a/test/gpu_basic.cu b/test/gpu_basic.cu new file mode 100644 index 000000000..4298da3bb --- /dev/null +++ b/test/gpu_basic.cu @@ -0,0 +1,461 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015-2016 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// workaround issue between gcc >= 4.7 and cuda 5.5 +#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7) + #undef _GLIBCXX_ATOMIC_BUILTINS + #undef _GLIBCXX_USE_INT128 +#endif + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int + +#include "main.h" +#include "gpu_common.h" + +// Check that dense modules can be properly parsed by nvcc +#include <Eigen/Dense> + +// struct Foo{ +// EIGEN_DEVICE_FUNC +// void operator()(int i, const float* mats, float* vecs) const { +// using namespace Eigen; +// // Matrix3f M(data); +// // Vector3f x(data+9); +// // Map<Vector3f>(data+9) = M.inverse() * x; +// Matrix3f M(mats+i/16); +// Vector3f x(vecs+i*3); +// // using std::min; +// // using std::sqrt; +// Map<Vector3f>(vecs+i*3) << x.minCoeff(), 1, 2;// / x.dot(x);//(M.inverse() * x) / x.x(); +// //x = x*2 + x.y() * x + x * x.maxCoeff() - x / x.sum(); +// } +// }; + +template<typename T> +struct coeff_wise { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + T x1(in+i); + T x2(in+i+1); + T x3(in+i+2); + Map<T> res(out+i*T::MaxSizeAtCompileTime); + + res.array() += (in[0] * x1 + x2).array() * x3.array(); + } +}; + +template<typename T> +struct complex_sqrt { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef typename T::Scalar ComplexType; + typedef typename T::Scalar::value_type ValueType; + const int num_special_inputs = 18; + + if (i == 0) { + const ValueType nan = std::numeric_limits<ValueType>::quiet_NaN(); + typedef Eigen::Vector<ComplexType, num_special_inputs> SpecialInputs; + SpecialInputs special_in; + special_in.setZero(); + int idx = 0; + special_in[idx++] = ComplexType(0, 0); + special_in[idx++] = ComplexType(-0, 0); + special_in[idx++] = ComplexType(0, -0); + special_in[idx++] = ComplexType(-0, -0); + // GCC's fallback sqrt implementation fails for inf inputs. + // It is called when _GLIBCXX_USE_C99_COMPLEX is false or if + // clang includes the GCC header (which temporarily disables + // _GLIBCXX_USE_C99_COMPLEX) + #if !defined(_GLIBCXX_COMPLEX) || \ + (_GLIBCXX_USE_C99_COMPLEX && !defined(__CLANG_CUDA_WRAPPERS_COMPLEX)) + const ValueType inf = std::numeric_limits<ValueType>::infinity(); + special_in[idx++] = ComplexType(1.0, inf); + special_in[idx++] = ComplexType(nan, inf); + special_in[idx++] = ComplexType(1.0, -inf); + special_in[idx++] = ComplexType(nan, -inf); + special_in[idx++] = ComplexType(-inf, 1.0); + special_in[idx++] = ComplexType(inf, 1.0); + special_in[idx++] = ComplexType(-inf, -1.0); + special_in[idx++] = ComplexType(inf, -1.0); + special_in[idx++] = ComplexType(-inf, nan); + special_in[idx++] = ComplexType(inf, nan); + #endif + special_in[idx++] = ComplexType(1.0, nan); + special_in[idx++] = ComplexType(nan, 1.0); + special_in[idx++] = ComplexType(nan, -1.0); + special_in[idx++] = ComplexType(nan, nan); + + Map<SpecialInputs> special_out(out); + special_out = special_in.cwiseSqrt(); + } + + T x1(in + i); + Map<T> res(out + num_special_inputs + i*T::MaxSizeAtCompileTime); + res = x1.cwiseSqrt(); + } +}; + +template<typename T> +struct complex_operators { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef typename T::Scalar ComplexType; + typedef typename T::Scalar::value_type ValueType; + const int num_scalar_operators = 24; + const int num_vector_operators = 23; // no unary + operator. + int out_idx = i * (num_scalar_operators + num_vector_operators * T::MaxSizeAtCompileTime); + + // Scalar operators. + const ComplexType a = in[i]; + const ComplexType b = in[i + 1]; + + out[out_idx++] = +a; + out[out_idx++] = -a; + + out[out_idx++] = a + b; + out[out_idx++] = a + numext::real(b); + out[out_idx++] = numext::real(a) + b; + out[out_idx++] = a - b; + out[out_idx++] = a - numext::real(b); + out[out_idx++] = numext::real(a) - b; + out[out_idx++] = a * b; + out[out_idx++] = a * numext::real(b); + out[out_idx++] = numext::real(a) * b; + out[out_idx++] = a / b; + out[out_idx++] = a / numext::real(b); + out[out_idx++] = numext::real(a) / b; + + out[out_idx] = a; out[out_idx++] += b; + out[out_idx] = a; out[out_idx++] -= b; + out[out_idx] = a; out[out_idx++] *= b; + out[out_idx] = a; out[out_idx++] /= b; + + const ComplexType true_value = ComplexType(ValueType(1), ValueType(0)); + const ComplexType false_value = ComplexType(ValueType(0), ValueType(0)); + out[out_idx++] = (a == b ? true_value : false_value); + out[out_idx++] = (a == numext::real(b) ? true_value : false_value); + out[out_idx++] = (numext::real(a) == b ? true_value : false_value); + out[out_idx++] = (a != b ? true_value : false_value); + out[out_idx++] = (a != numext::real(b) ? true_value : false_value); + out[out_idx++] = (numext::real(a) != b ? true_value : false_value); + + // Vector versions. + T x1(in + i); + T x2(in + i + 1); + const int res_size = T::MaxSizeAtCompileTime * num_scalar_operators; + const int size = T::MaxSizeAtCompileTime; + int block_idx = 0; + + Map<VectorX<ComplexType>> res(out + out_idx, res_size); + res.segment(block_idx, size) = -x1; + block_idx += size; + + res.segment(block_idx, size) = x1 + x2; + block_idx += size; + res.segment(block_idx, size) = x1 + x2.real(); + block_idx += size; + res.segment(block_idx, size) = x1.real() + x2; + block_idx += size; + res.segment(block_idx, size) = x1 - x2; + block_idx += size; + res.segment(block_idx, size) = x1 - x2.real(); + block_idx += size; + res.segment(block_idx, size) = x1.real() - x2; + block_idx += size; + res.segment(block_idx, size) = x1.array() * x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1.array() * x2.real().array(); + block_idx += size; + res.segment(block_idx, size) = x1.real().array() * x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1.array() / x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1.array() / x2.real().array(); + block_idx += size; + res.segment(block_idx, size) = x1.real().array() / x2.array(); + block_idx += size; + + res.segment(block_idx, size) = x1; res.segment(block_idx, size) += x2; + block_idx += size; + res.segment(block_idx, size) = x1; res.segment(block_idx, size) -= x2; + block_idx += size; + res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() *= x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() /= x2.array(); + block_idx += size; + + const T true_vector = T::Constant(true_value); + const T false_vector = T::Constant(false_value); + res.segment(block_idx, size) = (x1 == x2 ? true_vector : false_vector); + block_idx += size; + // Mixing types in equality comparison does not work. + // res.segment(block_idx, size) = (x1 == x2.real() ? true_vector : false_vector); + // block_idx += size; + // res.segment(block_idx, size) = (x1.real() == x2 ? true_vector : false_vector); + // block_idx += size; + res.segment(block_idx, size) = (x1 != x2 ? true_vector : false_vector); + block_idx += size; + // res.segment(block_idx, size) = (x1 != x2.real() ? true_vector : false_vector); + // block_idx += size; + // res.segment(block_idx, size) = (x1.real() != x2 ? true_vector : false_vector); + // block_idx += size; + } +}; + +template<typename T> +struct replicate { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + T x1(in+i); + int step = x1.size() * 4; + int stride = 3 * step; + + typedef Map<Array<typename T::Scalar,Dynamic,Dynamic> > MapType; + MapType(out+i*stride+0*step, x1.rows()*2, x1.cols()*2) = x1.replicate(2,2); + MapType(out+i*stride+1*step, x1.rows()*3, x1.cols()) = in[i] * x1.colwise().replicate(3); + MapType(out+i*stride+2*step, x1.rows(), x1.cols()*3) = in[i] * x1.rowwise().replicate(3); + } +}; + +template<typename T> +struct alloc_new_delete { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + int offset = 2*i*T::MaxSizeAtCompileTime; + T* x = new T(in + offset); + Eigen::Map<T> u(out + offset); + u = *x; + delete x; + + offset += T::MaxSizeAtCompileTime; + T* y = new T[1]; + y[0] = T(in + offset); + Eigen::Map<T> v(out + offset); + v = y[0]; + delete[] y; + } +}; + +template<typename T> +struct redux { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + int N = 10; + T x1(in+i); + out[i*N+0] = x1.minCoeff(); + out[i*N+1] = x1.maxCoeff(); + out[i*N+2] = x1.sum(); + out[i*N+3] = x1.prod(); + out[i*N+4] = x1.matrix().squaredNorm(); + out[i*N+5] = x1.matrix().norm(); + out[i*N+6] = x1.colwise().sum().maxCoeff(); + out[i*N+7] = x1.rowwise().maxCoeff().sum(); + out[i*N+8] = x1.matrix().colwise().squaredNorm().sum(); + } +}; + +template<typename T1, typename T2> +struct prod_test { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const + { + using namespace Eigen; + typedef Matrix<typename T1::Scalar, T1::RowsAtCompileTime, T2::ColsAtCompileTime> T3; + T1 x1(in+i); + T2 x2(in+i+1); + Map<T3> res(out+i*T3::MaxSizeAtCompileTime); + res += in[i] * x1 * x2; + } +}; + +template<typename T1, typename T2> +struct diagonal { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const + { + using namespace Eigen; + T1 x1(in+i); + Map<T2> res(out+i*T2::MaxSizeAtCompileTime); + res += x1.diagonal(); + } +}; + +template<typename T> +struct eigenvalues_direct { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec; + T M(in+i); + Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime); + T A = M*M.adjoint(); + SelfAdjointEigenSolver<T> eig; + eig.computeDirect(A); + res = eig.eigenvalues(); + } +}; + +template<typename T> +struct eigenvalues { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec; + T M(in+i); + Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime); + T A = M*M.adjoint(); + SelfAdjointEigenSolver<T> eig; + eig.compute(A); + res = eig.eigenvalues(); + } +}; + +template<typename T> +struct matrix_inverse { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + T M(in+i); + Map<T> res(out+i*T::MaxSizeAtCompileTime); + res = M.inverse(); + } +}; + +template<typename T> +struct numeric_limits_test { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + EIGEN_UNUSED_VARIABLE(in) + int out_idx = i * 5; + out[out_idx++] = numext::numeric_limits<float>::epsilon(); + out[out_idx++] = (numext::numeric_limits<float>::max)(); + out[out_idx++] = (numext::numeric_limits<float>::min)(); + out[out_idx++] = numext::numeric_limits<float>::infinity(); + out[out_idx++] = numext::numeric_limits<float>::quiet_NaN(); + } +}; + +template<typename Type1, typename Type2> +bool verifyIsApproxWithInfsNans(const Type1& a, const Type2& b, typename Type1::Scalar* = 0) // Enabled for Eigen's type only +{ + if (a.rows() != b.rows()) { + return false; + } + if (a.cols() != b.cols()) { + return false; + } + for (Index r = 0; r < a.rows(); ++r) { + for (Index c = 0; c < a.cols(); ++c) { + if (a(r, c) != b(r, c) + && !((numext::isnan)(a(r, c)) && (numext::isnan)(b(r, c))) + && !test_isApprox(a(r, c), b(r, c))) { + return false; + } + } + } + return true; +} + +template<typename Kernel, typename Input, typename Output> +void test_with_infs_nans(const Kernel& ker, int n, const Input& in, Output& out) +{ + Output out_ref, out_gpu; + #if !defined(EIGEN_GPU_COMPILE_PHASE) + out_ref = out_gpu = out; + #else + EIGEN_UNUSED_VARIABLE(in); + EIGEN_UNUSED_VARIABLE(out); + #endif + run_on_cpu (ker, n, in, out_ref); + run_on_gpu(ker, n, in, out_gpu); + #if !defined(EIGEN_GPU_COMPILE_PHASE) + verifyIsApproxWithInfsNans(out_ref, out_gpu); + #endif +} + +EIGEN_DECLARE_TEST(gpu_basic) +{ + ei_test_init_gpu(); + + int nthreads = 100; + Eigen::VectorXf in, out; + Eigen::VectorXcf cfin, cfout; + + #if !defined(EIGEN_GPU_COMPILE_PHASE) + int data_size = nthreads * 512; + in.setRandom(data_size); + out.setConstant(data_size, -1); + cfin.setRandom(data_size); + cfout.setConstant(data_size, -1); + #endif + + CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Vector3f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Array44f>(), nthreads, in, out) ); + +#if !defined(EIGEN_USE_HIP) + // FIXME + // These subtests result in a compile failure on the HIP platform + // + // eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error: + // base class 'internal::dense_xpr_base<Replicate<Array<float, 4, 1, 0, 4, 1>, -1, -1> >::type' + // (aka 'ArrayBase<Eigen::Replicate<Eigen::Array<float, 4, 1, 0, 4, 1>, -1, -1> >') has protected default constructor + CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array4f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array33f>(), nthreads, in, out) ); + + // HIP does not support new/delete on device. + CALL_SUBTEST( run_and_compare_to_gpu(alloc_new_delete<Vector3f>(), nthreads, in, out) ); +#endif + + CALL_SUBTEST( run_and_compare_to_gpu(redux<Array4f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(redux<Matrix3f>(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix2f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix3f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix4f>(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix3f>(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix2f>(), nthreads, in, out) ); + + // Test std::complex. + CALL_SUBTEST( run_and_compare_to_gpu(complex_operators<Vector3cf>(), nthreads, cfin, cfout) ); + CALL_SUBTEST( test_with_infs_nans(complex_sqrt<Vector3cf>(), nthreads, cfin, cfout) ); + + // numeric_limits + CALL_SUBTEST( test_with_infs_nans(numeric_limits_test<Vector3f>(), 1, in, out) ); + +#if defined(__NVCC__) + // FIXME + // These subtests compiles only with nvcc and fail with HIPCC and clang-cuda + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix4f>(), nthreads, in, out) ); + typedef Matrix<float,6,6> Matrix6f; + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix6f>(), nthreads, in, out) ); +#endif +} diff --git a/test/gpu_common.h b/test/gpu_common.h new file mode 100644 index 000000000..c37eaa13f --- /dev/null +++ b/test/gpu_common.h @@ -0,0 +1,176 @@ +#ifndef EIGEN_TEST_GPU_COMMON_H +#define EIGEN_TEST_GPU_COMMON_H + +#ifdef EIGEN_USE_HIP + #include <hip/hip_runtime.h> + #include <hip/hip_runtime_api.h> +#else + #include <cuda.h> + #include <cuda_runtime.h> + #include <cuda_runtime_api.h> +#endif + +#include <iostream> + +#define EIGEN_USE_GPU +#include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h> + +#if !defined(__CUDACC__) && !defined(__HIPCC__) +dim3 threadIdx, blockDim, blockIdx; +#endif + +template<typename Kernel, typename Input, typename Output> +void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out) +{ + for(int i=0; i<n; i++) + ker(i, in.data(), out.data()); +} + + +template<typename Kernel, typename Input, typename Output> +__global__ +EIGEN_HIP_LAUNCH_BOUNDS_1024 +void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input* in, Output* out) +{ + int i = threadIdx.x + blockIdx.x*blockDim.x; + if(i<n) { + ker(i, in, out); + } +} + + +template<typename Kernel, typename Input, typename Output> +void run_on_gpu(const Kernel& ker, int n, const Input& in, Output& out) +{ + typename Input::Scalar* d_in; + typename Output::Scalar* d_out; + std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar); + std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar); + + gpuMalloc((void**)(&d_in), in_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice); + + // Simple and non-optimal 1D mapping assuming n is not too large + // That's only for unit testing! + dim3 Blocks(128); + dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) ); + + gpuDeviceSynchronize(); + +#ifdef EIGEN_USE_HIP + hipLaunchKernelGGL(HIP_KERNEL_NAME(run_on_gpu_meta_kernel<Kernel, + typename std::decay<decltype(*d_in)>::type, + typename std::decay<decltype(*d_out)>::type>), + dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out); +#else + run_on_gpu_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out); +#endif + // Pre-launch errors. + gpuError_t err = gpuGetLastError(); + if (err != gpuSuccess) { + printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); + gpu_assert(false); + } + + // Kernel execution errors. + err = gpuDeviceSynchronize(); + if (err != gpuSuccess) { + printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); + gpu_assert(false); + } + + + // check inputs have not been modified + gpuMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost); + gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost); + + gpuFree(d_in); + gpuFree(d_out); +} + + +template<typename Kernel, typename Input, typename Output> +void run_and_compare_to_gpu(const Kernel& ker, int n, const Input& in, Output& out) +{ + Input in_ref, in_gpu; + Output out_ref, out_gpu; + #if !defined(EIGEN_GPU_COMPILE_PHASE) + in_ref = in_gpu = in; + out_ref = out_gpu = out; + #else + EIGEN_UNUSED_VARIABLE(in); + EIGEN_UNUSED_VARIABLE(out); + #endif + run_on_cpu (ker, n, in_ref, out_ref); + run_on_gpu(ker, n, in_gpu, out_gpu); + #if !defined(EIGEN_GPU_COMPILE_PHASE) + VERIFY_IS_APPROX(in_ref, in_gpu); + VERIFY_IS_APPROX(out_ref, out_gpu); + #endif +} + +struct compile_time_device_info { + EIGEN_DEVICE_FUNC + void operator()(int i, const int* /*in*/, int* info) const + { + if (i == 0) { + EIGEN_UNUSED_VARIABLE(info) + #if defined(__CUDA_ARCH__) + info[0] = int(__CUDA_ARCH__ +0); + #endif + #if defined(EIGEN_HIP_DEVICE_COMPILE) + info[1] = int(EIGEN_HIP_DEVICE_COMPILE +0); + #endif + } + } +}; + +void ei_test_init_gpu() +{ + int device = 0; + gpuDeviceProp_t deviceProp; + gpuGetDeviceProperties(&deviceProp, device); + + ArrayXi dummy(1), info(10); + info = -1; + run_on_gpu(compile_time_device_info(),10,dummy,info); + + + std::cout << "GPU compile-time info:\n"; + + #ifdef EIGEN_CUDACC + std::cout << " EIGEN_CUDACC: " << int(EIGEN_CUDACC) << "\n"; + #endif + + #ifdef EIGEN_CUDA_SDK_VER + std::cout << " EIGEN_CUDA_SDK_VER: " << int(EIGEN_CUDA_SDK_VER) << "\n"; + #endif + + #ifdef EIGEN_COMP_NVCC + std::cout << " EIGEN_COMP_NVCC: " << int(EIGEN_COMP_NVCC) << "\n"; + #endif + + #ifdef EIGEN_HIPCC + std::cout << " EIGEN_HIPCC: " << int(EIGEN_HIPCC) << "\n"; + #endif + + std::cout << " EIGEN_CUDA_ARCH: " << info[0] << "\n"; + std::cout << " EIGEN_HIP_DEVICE_COMPILE: " << info[1] << "\n"; + + std::cout << "GPU device info:\n"; + std::cout << " name: " << deviceProp.name << "\n"; + std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n"; + std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n"; + std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n"; + std::cout << " warpSize: " << deviceProp.warpSize << "\n"; + std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n"; + std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n"; + std::cout << " clockRate: " << deviceProp.clockRate << "\n"; + std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n"; + std::cout << " computeMode: " << deviceProp.computeMode << "\n"; +} + +#endif // EIGEN_TEST_GPU_COMMON_H diff --git a/test/half_float.cpp b/test/half_float.cpp index 3d2410aef..729de1bc7 100644 --- a/test/half_float.cpp +++ b/test/half_float.cpp @@ -9,7 +9,10 @@ #include "main.h" -#include <Eigen/src/Core/arch/CUDA/Half.h> +#include <Eigen/src/Core/arch/Default/Half.h> + +#define VERIFY_HALF_BITS_EQUAL(h, bits) \ + VERIFY_IS_EQUAL((numext::bit_cast<numext::uint16_t>(h)), (static_cast<numext::uint16_t>(bits))) // Make sure it's possible to forward declare Eigen::half namespace Eigen { @@ -20,56 +23,70 @@ using Eigen::half; void test_conversion() { - using Eigen::half_impl::__half; + using Eigen::half_impl::__half_raw; + + // Round-trip bit-cast with uint16. + VERIFY_IS_EQUAL( + numext::bit_cast<half>(numext::bit_cast<numext::uint16_t>(half(1.0f))), + half(1.0f)); + VERIFY_IS_EQUAL( + numext::bit_cast<half>(numext::bit_cast<numext::uint16_t>(half(0.5f))), + half(0.5f)); + VERIFY_IS_EQUAL( + numext::bit_cast<half>(numext::bit_cast<numext::uint16_t>(half(-0.33333f))), + half(-0.33333f)); + VERIFY_IS_EQUAL( + numext::bit_cast<half>(numext::bit_cast<numext::uint16_t>(half(0.0f))), + half(0.0f)); // Conversion from float. - VERIFY_IS_EQUAL(half(1.0f).x, 0x3c00); - VERIFY_IS_EQUAL(half(0.5f).x, 0x3800); - VERIFY_IS_EQUAL(half(0.33333f).x, 0x3555); - VERIFY_IS_EQUAL(half(0.0f).x, 0x0000); - VERIFY_IS_EQUAL(half(-0.0f).x, 0x8000); - VERIFY_IS_EQUAL(half(65504.0f).x, 0x7bff); - VERIFY_IS_EQUAL(half(65536.0f).x, 0x7c00); // Becomes infinity. + VERIFY_HALF_BITS_EQUAL(half(1.0f), 0x3c00); + VERIFY_HALF_BITS_EQUAL(half(0.5f), 0x3800); + VERIFY_HALF_BITS_EQUAL(half(0.33333f), 0x3555); + VERIFY_HALF_BITS_EQUAL(half(0.0f), 0x0000); + VERIFY_HALF_BITS_EQUAL(half(-0.0f), 0x8000); + VERIFY_HALF_BITS_EQUAL(half(65504.0f), 0x7bff); + VERIFY_HALF_BITS_EQUAL(half(65536.0f), 0x7c00); // Becomes infinity. // Denormals. - VERIFY_IS_EQUAL(half(-5.96046e-08f).x, 0x8001); - VERIFY_IS_EQUAL(half(5.96046e-08f).x, 0x0001); - VERIFY_IS_EQUAL(half(1.19209e-07f).x, 0x0002); + VERIFY_HALF_BITS_EQUAL(half(-5.96046e-08f), 0x8001); + VERIFY_HALF_BITS_EQUAL(half(5.96046e-08f), 0x0001); + VERIFY_HALF_BITS_EQUAL(half(1.19209e-07f), 0x0002); // Verify round-to-nearest-even behavior. - float val1 = float(half(__half(0x3c00))); - float val2 = float(half(__half(0x3c01))); - float val3 = float(half(__half(0x3c02))); - VERIFY_IS_EQUAL(half(0.5f * (val1 + val2)).x, 0x3c00); - VERIFY_IS_EQUAL(half(0.5f * (val2 + val3)).x, 0x3c02); + float val1 = float(half(__half_raw(0x3c00))); + float val2 = float(half(__half_raw(0x3c01))); + float val3 = float(half(__half_raw(0x3c02))); + VERIFY_HALF_BITS_EQUAL(half(0.5f * (val1 + val2)), 0x3c00); + VERIFY_HALF_BITS_EQUAL(half(0.5f * (val2 + val3)), 0x3c02); // Conversion from int. - VERIFY_IS_EQUAL(half(-1).x, 0xbc00); - VERIFY_IS_EQUAL(half(0).x, 0x0000); - VERIFY_IS_EQUAL(half(1).x, 0x3c00); - VERIFY_IS_EQUAL(half(2).x, 0x4000); - VERIFY_IS_EQUAL(half(3).x, 0x4200); + VERIFY_HALF_BITS_EQUAL(half(-1), 0xbc00); + VERIFY_HALF_BITS_EQUAL(half(0), 0x0000); + VERIFY_HALF_BITS_EQUAL(half(1), 0x3c00); + VERIFY_HALF_BITS_EQUAL(half(2), 0x4000); + VERIFY_HALF_BITS_EQUAL(half(3), 0x4200); // Conversion from bool. - VERIFY_IS_EQUAL(half(false).x, 0x0000); - VERIFY_IS_EQUAL(half(true).x, 0x3c00); + VERIFY_HALF_BITS_EQUAL(half(false), 0x0000); + VERIFY_HALF_BITS_EQUAL(half(true), 0x3c00); // Conversion to float. - VERIFY_IS_EQUAL(float(half(__half(0x0000))), 0.0f); - VERIFY_IS_EQUAL(float(half(__half(0x3c00))), 1.0f); + VERIFY_IS_EQUAL(float(half(__half_raw(0x0000))), 0.0f); + VERIFY_IS_EQUAL(float(half(__half_raw(0x3c00))), 1.0f); // Denormals. - VERIFY_IS_APPROX(float(half(__half(0x8001))), -5.96046e-08f); - VERIFY_IS_APPROX(float(half(__half(0x0001))), 5.96046e-08f); - VERIFY_IS_APPROX(float(half(__half(0x0002))), 1.19209e-07f); + VERIFY_IS_APPROX(float(half(__half_raw(0x8001))), -5.96046e-08f); + VERIFY_IS_APPROX(float(half(__half_raw(0x0001))), 5.96046e-08f); + VERIFY_IS_APPROX(float(half(__half_raw(0x0002))), 1.19209e-07f); // NaNs and infinities. VERIFY(!(numext::isinf)(float(half(65504.0f)))); // Largest finite number. VERIFY(!(numext::isnan)(float(half(0.0f)))); - VERIFY((numext::isinf)(float(half(__half(0xfc00))))); - VERIFY((numext::isnan)(float(half(__half(0xfc01))))); - VERIFY((numext::isinf)(float(half(__half(0x7c00))))); - VERIFY((numext::isnan)(float(half(__half(0x7c01))))); + VERIFY((numext::isinf)(float(half(__half_raw(0xfc00))))); + VERIFY((numext::isnan)(float(half(__half_raw(0xfc01))))); + VERIFY((numext::isinf)(float(half(__half_raw(0x7c00))))); + VERIFY((numext::isnan)(float(half(__half_raw(0x7c01))))); #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 @@ -79,12 +96,12 @@ void test_conversion() #endif // Exactly same checks as above, just directly on the half representation. - VERIFY(!(numext::isinf)(half(__half(0x7bff)))); - VERIFY(!(numext::isnan)(half(__half(0x0000)))); - VERIFY((numext::isinf)(half(__half(0xfc00)))); - VERIFY((numext::isnan)(half(__half(0xfc01)))); - VERIFY((numext::isinf)(half(__half(0x7c00)))); - VERIFY((numext::isnan)(half(__half(0x7c01)))); + VERIFY(!(numext::isinf)(half(__half_raw(0x7bff)))); + VERIFY(!(numext::isnan)(half(__half_raw(0x0000)))); + VERIFY((numext::isinf)(half(__half_raw(0xfc00)))); + VERIFY((numext::isnan)(half(__half_raw(0xfc01)))); + VERIFY((numext::isinf)(half(__half_raw(0x7c00)))); + VERIFY((numext::isnan)(half(__half_raw(0x7c01)))); #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 @@ -92,24 +109,50 @@ void test_conversion() VERIFY((numext::isinf)(half(1.0 / 0.0))); VERIFY((numext::isinf)(half(-1.0 / 0.0))); #endif + + // Conversion to bool + VERIFY(!static_cast<bool>(half(0.0))); + VERIFY(!static_cast<bool>(half(-0.0))); + VERIFY(static_cast<bool>(half(__half_raw(0x7bff)))); + VERIFY(static_cast<bool>(half(-0.33333))); + VERIFY(static_cast<bool>(half(1.0))); + VERIFY(static_cast<bool>(half(-1.0))); + VERIFY(static_cast<bool>(half(-5.96046e-08f))); } void test_numtraits() { - std::cout << "epsilon = " << NumTraits<half>::epsilon() << " (0x" << std::hex << NumTraits<half>::epsilon().x << ")" << std::endl; - std::cout << "highest = " << NumTraits<half>::highest() << " (0x" << std::hex << NumTraits<half>::highest().x << ")" << std::endl; - std::cout << "lowest = " << NumTraits<half>::lowest() << " (0x" << std::hex << NumTraits<half>::lowest().x << ")" << std::endl; - std::cout << "min = " << (std::numeric_limits<half>::min)() << " (0x" << std::hex << half((std::numeric_limits<half>::min)()).x << ")" << std::endl; - std::cout << "denorm min = " << (std::numeric_limits<half>::denorm_min)() << " (0x" << std::hex << half((std::numeric_limits<half>::denorm_min)()).x << ")" << std::endl; - std::cout << "infinity = " << NumTraits<half>::infinity() << " (0x" << std::hex << NumTraits<half>::infinity().x << ")" << std::endl; - std::cout << "quiet nan = " << NumTraits<half>::quiet_NaN() << " (0x" << std::hex << NumTraits<half>::quiet_NaN().x << ")" << std::endl; - std::cout << "signaling nan = " << std::numeric_limits<half>::signaling_NaN() << " (0x" << std::hex << std::numeric_limits<half>::signaling_NaN().x << ")" << std::endl; + std::cout << "epsilon = " << NumTraits<half>::epsilon() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<half>::epsilon()) << ")" << std::endl; + std::cout << "highest = " << NumTraits<half>::highest() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<half>::highest()) << ")" << std::endl; + std::cout << "lowest = " << NumTraits<half>::lowest() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<half>::lowest()) << ")" << std::endl; + std::cout << "min = " << (std::numeric_limits<half>::min)() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(half((std::numeric_limits<half>::min)())) << ")" << std::endl; + std::cout << "denorm min = " << (std::numeric_limits<half>::denorm_min)() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(half((std::numeric_limits<half>::denorm_min)())) << ")" << std::endl; + std::cout << "infinity = " << NumTraits<half>::infinity() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<half>::infinity()) << ")" << std::endl; + std::cout << "quiet nan = " << NumTraits<half>::quiet_NaN() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(NumTraits<half>::quiet_NaN()) << ")" << std::endl; + std::cout << "signaling nan = " << std::numeric_limits<half>::signaling_NaN() << " (0x" << std::hex << numext::bit_cast<numext::uint16_t>(std::numeric_limits<half>::signaling_NaN()) << ")" << std::endl; VERIFY(NumTraits<half>::IsSigned); - VERIFY_IS_EQUAL( std::numeric_limits<half>::infinity().x, half(std::numeric_limits<float>::infinity()).x ); - VERIFY_IS_EQUAL( std::numeric_limits<half>::quiet_NaN().x, half(std::numeric_limits<float>::quiet_NaN()).x ); - VERIFY_IS_EQUAL( std::numeric_limits<half>::signaling_NaN().x, half(std::numeric_limits<float>::signaling_NaN()).x ); + VERIFY_IS_EQUAL( + numext::bit_cast<numext::uint16_t>(std::numeric_limits<half>::infinity()), + numext::bit_cast<numext::uint16_t>(half(std::numeric_limits<float>::infinity())) ); + // There is no guarantee that casting a 32-bit NaN to 16-bit has a precise + // bit pattern. We test that it is in fact a NaN, then test the signaling + // bit (msb of significand is 1 for quiet, 0 for signaling). + const numext::uint16_t HALF_QUIET_BIT = 0x0200; + VERIFY( + (numext::isnan)(std::numeric_limits<half>::quiet_NaN()) + && (numext::isnan)(half(std::numeric_limits<float>::quiet_NaN())) + && ((numext::bit_cast<numext::uint16_t>(std::numeric_limits<half>::quiet_NaN()) & HALF_QUIET_BIT) > 0) + && ((numext::bit_cast<numext::uint16_t>(half(std::numeric_limits<float>::quiet_NaN())) & HALF_QUIET_BIT) > 0) ); + // After a cast to half, a signaling NaN may become non-signaling + // (e.g. in the case of casting float to native __fp16). Thus, we check that + // both are NaN, and that only the `numeric_limits` version is signaling. + VERIFY( + (numext::isnan)(std::numeric_limits<half>::signaling_NaN()) + && (numext::isnan)(half(std::numeric_limits<float>::signaling_NaN())) + && ((numext::bit_cast<numext::uint16_t>(std::numeric_limits<half>::signaling_NaN()) & HALF_QUIET_BIT) == 0) ); + VERIFY( (std::numeric_limits<half>::min)() > half(0.f) ); VERIFY( (std::numeric_limits<half>::denorm_min)() > half(0.f) ); VERIFY( (std::numeric_limits<half>::min)()/half(2) > half(0.f) ); @@ -125,6 +168,20 @@ void test_arithmetic() VERIFY_IS_APPROX(float(half(1.0f) / half(3.0f)), 0.33333f); VERIFY_IS_EQUAL(float(-half(4096.0f)), -4096.0f); VERIFY_IS_EQUAL(float(-half(-4096.0f)), 4096.0f); + + half x(3); + half y = ++x; + VERIFY_IS_EQUAL(x, half(4)); + VERIFY_IS_EQUAL(y, half(4)); + y = --x; + VERIFY_IS_EQUAL(x, half(3)); + VERIFY_IS_EQUAL(y, half(3)); + y = x++; + VERIFY_IS_EQUAL(x, half(4)); + VERIFY_IS_EQUAL(y, half(3)); + y = x--; + VERIFY_IS_EQUAL(x, half(3)); + VERIFY_IS_EQUAL(y, half(4)); } void test_comparison() @@ -197,6 +254,11 @@ void test_basic_functions() VERIFY_IS_APPROX(float(numext::exp(half(EIGEN_PI))), 20.f + float(EIGEN_PI)); VERIFY_IS_APPROX(float(exp(half(EIGEN_PI))), 20.f + float(EIGEN_PI)); + VERIFY_IS_EQUAL(float(numext::expm1(half(0.0f))), 0.0f); + VERIFY_IS_EQUAL(float(expm1(half(0.0f))), 0.0f); + VERIFY_IS_APPROX(float(numext::expm1(half(2.0f))), 6.3890561f); + VERIFY_IS_APPROX(float(expm1(half(2.0f))), 6.3890561f); + VERIFY_IS_EQUAL(float(numext::log(half(1.0f))), 0.0f); VERIFY_IS_EQUAL(float(log(half(1.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::log(half(10.0f))), 2.30273f); @@ -206,6 +268,11 @@ void test_basic_functions() VERIFY_IS_EQUAL(float(log1p(half(0.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::log1p(half(10.0f))), 2.3978953f); VERIFY_IS_APPROX(float(log1p(half(10.0f))), 2.3978953f); + + VERIFY_IS_APPROX(numext::fmod(half(5.3f), half(2.0f)), half(1.3f)); + VERIFY_IS_APPROX(fmod(half(5.3f), half(2.0f)), half(1.3f)); + VERIFY_IS_APPROX(numext::fmod(half(-18.5f), half(-4.2f)), half(-1.7f)); + VERIFY_IS_APPROX(fmod(half(-18.5f), half(-4.2f)), half(-1.7f)); } void test_trigonometric_functions() @@ -213,8 +280,8 @@ void test_trigonometric_functions() VERIFY_IS_APPROX(numext::cos(half(0.0f)), half(cosf(0.0f))); VERIFY_IS_APPROX(cos(half(0.0f)), half(cosf(0.0f))); VERIFY_IS_APPROX(numext::cos(half(EIGEN_PI)), half(cosf(EIGEN_PI))); - //VERIFY_IS_APPROX(numext::cos(half(EIGEN_PI/2)), half(cosf(EIGEN_PI/2))); - //VERIFY_IS_APPROX(numext::cos(half(3*EIGEN_PI/2)), half(cosf(3*EIGEN_PI/2))); + // VERIFY_IS_APPROX(numext::cos(half(EIGEN_PI/2)), half(cosf(EIGEN_PI/2))); + // VERIFY_IS_APPROX(numext::cos(half(3*EIGEN_PI/2)), half(cosf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::cos(half(3.5f)), half(cosf(3.5f))); VERIFY_IS_APPROX(numext::sin(half(0.0f)), half(sinf(0.0f))); @@ -252,13 +319,31 @@ void test_array() ss << a1; } -void test_half_float() +void test_product() +{ + typedef Matrix<half,Dynamic,Dynamic> MatrixXh; + Index rows = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + Index cols = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + MatrixXh Ah = MatrixXh::Random(rows,depth); + MatrixXh Bh = MatrixXh::Random(depth,cols); + MatrixXh Ch = MatrixXh::Random(rows,cols); + MatrixXf Af = Ah.cast<float>(); + MatrixXf Bf = Bh.cast<float>(); + MatrixXf Cf = Ch.cast<float>(); + VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast<half>()); +} + +EIGEN_DECLARE_TEST(half_float) { - CALL_SUBTEST(test_conversion()); CALL_SUBTEST(test_numtraits()); - CALL_SUBTEST(test_arithmetic()); - CALL_SUBTEST(test_comparison()); - CALL_SUBTEST(test_basic_functions()); - CALL_SUBTEST(test_trigonometric_functions()); - CALL_SUBTEST(test_array()); + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST(test_conversion()); + CALL_SUBTEST(test_arithmetic()); + CALL_SUBTEST(test_comparison()); + CALL_SUBTEST(test_basic_functions()); + CALL_SUBTEST(test_trigonometric_functions()); + CALL_SUBTEST(test_array()); + CALL_SUBTEST(test_product()); + } } diff --git a/test/hessenberg.cpp b/test/hessenberg.cpp index 96bc19e2e..0e1b0098d 100644 --- a/test/hessenberg.cpp +++ b/test/hessenberg.cpp @@ -49,7 +49,7 @@ template<typename Scalar,int Size> void hessenberg(int size = Size) // TODO: Add tests for packedMatrix() and householderCoefficients() } -void test_hessenberg() +EIGEN_DECLARE_TEST(hessenberg) { CALL_SUBTEST_1(( hessenberg<std::complex<double>,1>() )); CALL_SUBTEST_2(( hessenberg<std::complex<double>,2>() )); diff --git a/test/householder.cpp b/test/householder.cpp index c5f6b5e4f..cad8138a2 100644 --- a/test/householder.cpp +++ b/test/householder.cpp @@ -12,7 +12,6 @@ template<typename MatrixType> void householder(const MatrixType& m) { - typedef typename MatrixType::Index Index; static bool even = true; even = !even; /* this test covers the following files: @@ -49,6 +48,17 @@ template<typename MatrixType> void householder(const MatrixType& m) v1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(v1.norm(), v2.norm()); + // reconstruct householder matrix: + SquareMatrixType id, H1, H2; + id.setIdentity(rows, rows); + H1 = H2 = id; + VectorType vv(rows); + vv << Scalar(1), essential; + H1.applyHouseholderOnTheLeft(essential, beta, tmp); + H2.applyHouseholderOnTheRight(essential, beta, tmp); + VERIFY_IS_APPROX(H1, H2); + VERIFY_IS_APPROX(H1, id - beta * vv*vv.adjoint()); + MatrixType m1(rows, cols), m2(rows, cols); @@ -69,7 +79,7 @@ template<typename MatrixType> void householder(const MatrixType& m) m3.rowwise() = v1.transpose(); m4 = m3; m3.row(0).makeHouseholder(essential, beta, alpha); - m3.applyHouseholderOnTheRight(essential,beta,tmp); + m3.applyHouseholderOnTheRight(essential.conjugate(),beta,tmp); VERIFY_IS_APPROX(m3.norm(), m4.norm()); if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(m3.block(0,1,rows,rows-1).norm(), m3.norm()); VERIFY_IS_MUCH_SMALLER_THAN(numext::imag(m3(0,0)), numext::real(m3(0,0))); @@ -104,14 +114,14 @@ template<typename MatrixType> void householder(const MatrixType& m) VERIFY_IS_APPROX(hseq_mat.adjoint(), hseq_mat_adj); VERIFY_IS_APPROX(hseq_mat.conjugate(), hseq_mat_conj); VERIFY_IS_APPROX(hseq_mat.transpose(), hseq_mat_trans); - VERIFY_IS_APPROX(hseq_mat * m6, hseq_mat * m6); - VERIFY_IS_APPROX(hseq_mat.adjoint() * m6, hseq_mat_adj * m6); - VERIFY_IS_APPROX(hseq_mat.conjugate() * m6, hseq_mat_conj * m6); - VERIFY_IS_APPROX(hseq_mat.transpose() * m6, hseq_mat_trans * m6); - VERIFY_IS_APPROX(m6 * hseq_mat, m6 * hseq_mat); - VERIFY_IS_APPROX(m6 * hseq_mat.adjoint(), m6 * hseq_mat_adj); - VERIFY_IS_APPROX(m6 * hseq_mat.conjugate(), m6 * hseq_mat_conj); - VERIFY_IS_APPROX(m6 * hseq_mat.transpose(), m6 * hseq_mat_trans); + VERIFY_IS_APPROX(hseq * m6, hseq_mat * m6); + VERIFY_IS_APPROX(hseq.adjoint() * m6, hseq_mat_adj * m6); + VERIFY_IS_APPROX(hseq.conjugate() * m6, hseq_mat_conj * m6); + VERIFY_IS_APPROX(hseq.transpose() * m6, hseq_mat_trans * m6); + VERIFY_IS_APPROX(m6 * hseq, m6 * hseq_mat); + VERIFY_IS_APPROX(m6 * hseq.adjoint(), m6 * hseq_mat_adj); + VERIFY_IS_APPROX(m6 * hseq.conjugate(), m6 * hseq_mat_conj); + VERIFY_IS_APPROX(m6 * hseq.transpose(), m6 * hseq_mat_trans); // test householder sequence on the right with a shift @@ -123,7 +133,7 @@ template<typename MatrixType> void householder(const MatrixType& m) VERIFY_IS_APPROX(m3 * m5, m1); // test evaluating rhseq to a dense matrix, then applying } -void test_householder() +EIGEN_DECLARE_TEST(householder) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( householder(Matrix<double,2,2>()) ); diff --git a/test/incomplete_cholesky.cpp b/test/incomplete_cholesky.cpp index 59ffe9259..ecc17f5c3 100644 --- a/test/incomplete_cholesky.cpp +++ b/test/incomplete_cholesky.cpp @@ -12,14 +12,14 @@ #include <Eigen/IterativeLinearSolvers> #include <unsupported/Eigen/IterativeSolvers> -template<typename T, typename I> void test_incomplete_cholesky_T() +template<typename T, typename I_> void test_incomplete_cholesky_T() { - typedef SparseMatrix<T,0,I> SparseMatrixType; - ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, AMDOrdering<I> > > cg_illt_lower_amd; - ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, NaturalOrdering<I> > > cg_illt_lower_nat; - ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, AMDOrdering<I> > > cg_illt_upper_amd; - ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, NaturalOrdering<I> > > cg_illt_upper_nat; - ConjugateGradient<SparseMatrixType, Upper|Lower, IncompleteCholesky<T, Lower, AMDOrdering<I> > > cg_illt_uplo_amd; + typedef SparseMatrix<T,0,I_> SparseMatrixType; + ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, AMDOrdering<I_> > > cg_illt_lower_amd; + ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, NaturalOrdering<I_> > > cg_illt_lower_nat; + ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, AMDOrdering<I_> > > cg_illt_upper_amd; + ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, NaturalOrdering<I_> > > cg_illt_upper_nat; + ConjugateGradient<SparseMatrixType, Upper|Lower, IncompleteCholesky<T, Lower, AMDOrdering<I_> > > cg_illt_uplo_amd; CALL_SUBTEST( check_sparse_spd_solving(cg_illt_lower_amd) ); @@ -29,14 +29,10 @@ template<typename T, typename I> void test_incomplete_cholesky_T() CALL_SUBTEST( check_sparse_spd_solving(cg_illt_uplo_amd) ); } -void test_incomplete_cholesky() +template<int> +void bug1150() { - CALL_SUBTEST_1(( test_incomplete_cholesky_T<double,int>() )); - CALL_SUBTEST_2(( test_incomplete_cholesky_T<std::complex<double>, int>() )); - CALL_SUBTEST_3(( test_incomplete_cholesky_T<double,long int>() )); - -#ifdef EIGEN_TEST_PART_1 - // regression for bug 1150 + // regression for bug 1150 for(int N = 1; N<20; ++N) { Eigen::MatrixXd b( N, N ); @@ -61,5 +57,13 @@ void test_incomplete_cholesky() VERIFY(solver.preconditioner().info() == Eigen::Success); VERIFY(solver.info() == Eigen::Success); } -#endif +} + +EIGEN_DECLARE_TEST(incomplete_cholesky) +{ + CALL_SUBTEST_1(( test_incomplete_cholesky_T<double,int>() )); + CALL_SUBTEST_2(( test_incomplete_cholesky_T<std::complex<double>, int>() )); + CALL_SUBTEST_3(( test_incomplete_cholesky_T<double,long int>() )); + + CALL_SUBTEST_1(( bug1150<0>() )); } diff --git a/test/indexed_view.cpp b/test/indexed_view.cpp new file mode 100644 index 000000000..72c54af68 --- /dev/null +++ b/test/indexed_view.cpp @@ -0,0 +1,473 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifdef EIGEN_TEST_PART_2 +// Make sure we also check c++11 max implementation +#define EIGEN_MAX_CPP_VER 11 +#endif + +#ifdef EIGEN_TEST_PART_3 +// Make sure we also check c++98 max implementation +#define EIGEN_MAX_CPP_VER 03 + +// We need to disable this warning when compiling with c++11 while limiting Eigen to c++98 +// Ideally we would rather configure the compiler to build in c++98 mode but this needs +// to be done at the CMakeLists.txt level. +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) + #pragma GCC diagnostic ignored "-Wdeprecated" +#endif + +#if defined(__GNUC__) && (__GNUC__ >=9) + #pragma GCC diagnostic ignored "-Wdeprecated-copy" +#endif +#if defined(__clang__) && (__clang_major__ >= 10) + #pragma clang diagnostic ignored "-Wdeprecated-copy" +#endif + +#endif + +#include <valarray> +#include <vector> +#include "main.h" + +#if EIGEN_HAS_CXX11 +#include <array> +#endif + +typedef std::pair<Index,Index> IndexPair; + +int encode(Index i, Index j) { + return int(i*100 + j); +} + +IndexPair decode(Index ij) { + return IndexPair(ij / 100, ij % 100); +} + +template<typename T> +bool match(const T& xpr, std::string ref, std::string str_xpr = "") { + EIGEN_UNUSED_VARIABLE(str_xpr); + std::stringstream str; + str << xpr; + if(!(str.str() == ref)) + std::cout << str_xpr << "\n" << xpr << "\n\n"; + return str.str() == ref; +} + +#define MATCH(X,R) match(X, R, #X) + +template<typename T1,typename T2> +typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type +is_same_eq(const T1& a, const T2& b) +{ + return (a == b).all(); +} + +template<typename T1,typename T2> +bool is_same_seq(const T1& a, const T2& b) +{ + bool ok = a.first()==b.first() && a.size() == b.size() && Index(a.incrObject())==Index(b.incrObject());; + if(!ok) + { + std::cerr << "seqN(" << a.first() << ", " << a.size() << ", " << Index(a.incrObject()) << ") != "; + std::cerr << "seqN(" << b.first() << ", " << b.size() << ", " << Index(b.incrObject()) << ")\n"; + } + return ok; +} + +template<typename T1,typename T2> +typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type +is_same_seq_type(const T1& a, const T2& b) +{ + return is_same_seq(a,b); +} + + + +#define VERIFY_EQ_INT(A,B) VERIFY_IS_APPROX(int(A),int(B)) + +// C++03 does not allow local or unnamed enums as index +enum DummyEnum { XX=0, YY=1 }; + +void check_indexed_view() +{ + Index n = 10; + + ArrayXd a = ArrayXd::LinSpaced(n,0,n-1); + Array<double,1,Dynamic> b = a.transpose(); + + #if EIGEN_COMP_CXXVER>=14 + ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ref(encode)); + #else + ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ptr_fun(&encode)); + #endif + + for(Index i=0; i<n; ++i) + for(Index j=0; j<n; ++j) + VERIFY( decode(A(i,j)) == IndexPair(i,j) ); + + Array4i eii(4); eii << 3, 1, 6, 5; + std::valarray<int> vali(4); Map<ArrayXi>(&vali[0],4) = eii; + std::vector<int> veci(4); Map<ArrayXi>(veci.data(),4) = eii; + + VERIFY( MATCH( A(3, seq(9,3,-1)), + "309 308 307 306 305 304 303") + ); + + VERIFY( MATCH( A(seqN(2,5), seq(9,3,-1)), + "209 208 207 206 205 204 203\n" + "309 308 307 306 305 304 303\n" + "409 408 407 406 405 404 403\n" + "509 508 507 506 505 504 503\n" + "609 608 607 606 605 604 603") + ); + + VERIFY( MATCH( A(seqN(2,5), 5), + "205\n" + "305\n" + "405\n" + "505\n" + "605") + ); + + VERIFY( MATCH( A(seqN(last,5,-1), seq(2,last)), + "902 903 904 905 906 907 908 909\n" + "802 803 804 805 806 807 808 809\n" + "702 703 704 705 706 707 708 709\n" + "602 603 604 605 606 607 608 609\n" + "502 503 504 505 506 507 508 509") + ); + + VERIFY( MATCH( A(eii, veci), + "303 301 306 305\n" + "103 101 106 105\n" + "603 601 606 605\n" + "503 501 506 505") + ); + + VERIFY( MATCH( A(eii, all), + "300 301 302 303 304 305 306 307 308 309\n" + "100 101 102 103 104 105 106 107 108 109\n" + "600 601 602 603 604 605 606 607 608 609\n" + "500 501 502 503 504 505 506 507 508 509") + ); + + // take row number 3, and repeat it 5 times + VERIFY( MATCH( A(seqN(3,5,0), all), + "300 301 302 303 304 305 306 307 308 309\n" + "300 301 302 303 304 305 306 307 308 309\n" + "300 301 302 303 304 305 306 307 308 309\n" + "300 301 302 303 304 305 306 307 308 309\n" + "300 301 302 303 304 305 306 307 308 309") + ); + + VERIFY( MATCH( a(seqN(3,3),0), "3\n4\n5" ) ); + VERIFY( MATCH( a(seq(3,5)), "3\n4\n5" ) ); + VERIFY( MATCH( a(seqN(3,3,1)), "3\n4\n5" ) ); + VERIFY( MATCH( a(seqN(5,3,-1)), "5\n4\n3" ) ); + + VERIFY( MATCH( b(0,seqN(3,3)), "3 4 5" ) ); + VERIFY( MATCH( b(seq(3,5)), "3 4 5" ) ); + VERIFY( MATCH( b(seqN(3,3,1)), "3 4 5" ) ); + VERIFY( MATCH( b(seqN(5,3,-1)), "5 4 3" ) ); + + VERIFY( MATCH( b(all), "0 1 2 3 4 5 6 7 8 9" ) ); + VERIFY( MATCH( b(eii), "3 1 6 5" ) ); + + Array44i B; + B.setRandom(); + VERIFY( (A(seqN(2,5), 5)).ColsAtCompileTime == 1); + VERIFY( (A(seqN(2,5), 5)).RowsAtCompileTime == Dynamic); + VERIFY_EQ_INT( (A(seqN(2,5), 5)).InnerStrideAtCompileTime , A.InnerStrideAtCompileTime); + VERIFY_EQ_INT( (A(seqN(2,5), 5)).OuterStrideAtCompileTime , A.col(5).OuterStrideAtCompileTime); + + VERIFY_EQ_INT( (A(5,seqN(2,5))).InnerStrideAtCompileTime , A.row(5).InnerStrideAtCompileTime); + VERIFY_EQ_INT( (A(5,seqN(2,5))).OuterStrideAtCompileTime , A.row(5).OuterStrideAtCompileTime); + VERIFY_EQ_INT( (B(1,seqN(1,2))).InnerStrideAtCompileTime , B.row(1).InnerStrideAtCompileTime); + VERIFY_EQ_INT( (B(1,seqN(1,2))).OuterStrideAtCompileTime , B.row(1).OuterStrideAtCompileTime); + + VERIFY_EQ_INT( (A(seqN(2,5), seq(1,3))).InnerStrideAtCompileTime , A.InnerStrideAtCompileTime); + VERIFY_EQ_INT( (A(seqN(2,5), seq(1,3))).OuterStrideAtCompileTime , A.OuterStrideAtCompileTime); + VERIFY_EQ_INT( (B(seqN(1,2), seq(1,3))).InnerStrideAtCompileTime , B.InnerStrideAtCompileTime); + VERIFY_EQ_INT( (B(seqN(1,2), seq(1,3))).OuterStrideAtCompileTime , B.OuterStrideAtCompileTime); + VERIFY_EQ_INT( (A(seqN(2,5,2), seq(1,3,2))).InnerStrideAtCompileTime , Dynamic); + VERIFY_EQ_INT( (A(seqN(2,5,2), seq(1,3,2))).OuterStrideAtCompileTime , Dynamic); + VERIFY_EQ_INT( (A(seqN(2,5,fix<2>), seq(1,3,fix<3>))).InnerStrideAtCompileTime , 2); + VERIFY_EQ_INT( (A(seqN(2,5,fix<2>), seq(1,3,fix<3>))).OuterStrideAtCompileTime , Dynamic); + VERIFY_EQ_INT( (B(seqN(1,2,fix<2>), seq(1,3,fix<3>))).InnerStrideAtCompileTime , 2); + VERIFY_EQ_INT( (B(seqN(1,2,fix<2>), seq(1,3,fix<3>))).OuterStrideAtCompileTime , 3*4); + + VERIFY_EQ_INT( (A(seqN(2,fix<5>), seqN(1,fix<3>))).RowsAtCompileTime, 5); + VERIFY_EQ_INT( (A(seqN(2,fix<5>), seqN(1,fix<3>))).ColsAtCompileTime, 3); + VERIFY_EQ_INT( (A(seqN(2,fix<5>(5)), seqN(1,fix<3>(3)))).RowsAtCompileTime, 5); + VERIFY_EQ_INT( (A(seqN(2,fix<5>(5)), seqN(1,fix<3>(3)))).ColsAtCompileTime, 3); + VERIFY_EQ_INT( (A(seqN(2,fix<Dynamic>(5)), seqN(1,fix<Dynamic>(3)))).RowsAtCompileTime, Dynamic); + VERIFY_EQ_INT( (A(seqN(2,fix<Dynamic>(5)), seqN(1,fix<Dynamic>(3)))).ColsAtCompileTime, Dynamic); + VERIFY_EQ_INT( (A(seqN(2,fix<Dynamic>(5)), seqN(1,fix<Dynamic>(3)))).rows(), 5); + VERIFY_EQ_INT( (A(seqN(2,fix<Dynamic>(5)), seqN(1,fix<Dynamic>(3)))).cols(), 3); + + VERIFY( is_same_seq_type( seqN(2,5,fix<-1>), seqN(2,5,fix<-1>(-1)) ) ); + VERIFY( is_same_seq_type( seqN(2,5), seqN(2,5,fix<1>(1)) ) ); + VERIFY( is_same_seq_type( seqN(2,5,3), seqN(2,5,fix<DynamicIndex>(3)) ) ); + VERIFY( is_same_seq_type( seq(2,7,fix<3>), seqN(2,2,fix<3>) ) ); + VERIFY( is_same_seq_type( seqN(2,fix<Dynamic>(5),3), seqN(2,5,fix<DynamicIndex>(3)) ) ); + VERIFY( is_same_seq_type( seqN(2,fix<5>(5),fix<-2>), seqN(2,fix<5>,fix<-2>()) ) ); + + VERIFY( is_same_seq_type( seq(2,fix<5>), seqN(2,4) ) ); +#if EIGEN_HAS_CXX11 + VERIFY( is_same_seq_type( seq(fix<2>,fix<5>), seqN(fix<2>,fix<4>) ) ); + VERIFY( is_same_seq( seqN(2,std::integral_constant<int,5>(),std::integral_constant<int,-2>()), seqN(2,fix<5>,fix<-2>()) ) ); + VERIFY( is_same_seq( seq(std::integral_constant<int,1>(),std::integral_constant<int,5>(),std::integral_constant<int,2>()), + seq(fix<1>,fix<5>,fix<2>()) ) ); + VERIFY( is_same_seq_type( seqN(2,std::integral_constant<int,5>(),std::integral_constant<int,-2>()), seqN(2,fix<5>,fix<-2>()) ) ); + VERIFY( is_same_seq_type( seq(std::integral_constant<int,1>(),std::integral_constant<int,5>(),std::integral_constant<int,2>()), + seq(fix<1>,fix<5>,fix<2>()) ) ); + + VERIFY( is_same_seq_type( seqN(2,std::integral_constant<int,5>()), seqN(2,fix<5>) ) ); + VERIFY( is_same_seq_type( seq(std::integral_constant<int,1>(),std::integral_constant<int,5>()), seq(fix<1>,fix<5>) ) ); +#else + // sorry, no compile-time size recovery in c++98/03 + VERIFY( is_same_seq( seq(fix<2>,fix<5>), seqN(fix<2>,fix<4>) ) ); +#endif + + VERIFY( (A(seqN(2,fix<5>), 5)).RowsAtCompileTime == 5); + VERIFY( (A(4, all)).ColsAtCompileTime == Dynamic); + VERIFY( (A(4, all)).RowsAtCompileTime == 1); + VERIFY( (B(1, all)).ColsAtCompileTime == 4); + VERIFY( (B(1, all)).RowsAtCompileTime == 1); + VERIFY( (B(all,1)).ColsAtCompileTime == 1); + VERIFY( (B(all,1)).RowsAtCompileTime == 4); + + VERIFY(int( (A(all, eii)).ColsAtCompileTime) == int(eii.SizeAtCompileTime)); + VERIFY_EQ_INT( (A(eii, eii)).Flags&DirectAccessBit, (unsigned int)(0)); + VERIFY_EQ_INT( (A(eii, eii)).InnerStrideAtCompileTime, 0); + VERIFY_EQ_INT( (A(eii, eii)).OuterStrideAtCompileTime, 0); + + VERIFY_IS_APPROX( A(seq(n-1,2,-2), seqN(n-1-6,3,-1)), A(seq(last,2,fix<-2>), seqN(last-6,3,fix<-1>)) ); + + VERIFY_IS_APPROX( A(seq(n-1,2,-2), seqN(n-1-6,4)), A(seq(last,2,-2), seqN(last-6,4)) ); + VERIFY_IS_APPROX( A(seq(n-1-6,n-1-2), seqN(n-1-6,4)), A(seq(last-6,last-2), seqN(6+last-6-6,4)) ); + VERIFY_IS_APPROX( A(seq((n-1)/2,(n)/2+3), seqN(2,4)), A(seq(last/2,(last+1)/2+3), seqN(last+2-last,4)) ); + VERIFY_IS_APPROX( A(seq(n-2,2,-2), seqN(n-8,4)), A(seq(lastp1-2,2,-2), seqN(lastp1-8,4)) ); + + // Check all combinations of seq: + VERIFY_IS_APPROX( A(seq(1,n-1-2,2), seq(1,n-1-2,2)), A(seq(1,last-2,2), seq(1,last-2,fix<2>)) ); + VERIFY_IS_APPROX( A(seq(n-1-5,n-1-2,2), seq(n-1-5,n-1-2,2)), A(seq(last-5,last-2,2), seq(last-5,last-2,fix<2>)) ); + VERIFY_IS_APPROX( A(seq(n-1-5,7,2), seq(n-1-5,7,2)), A(seq(last-5,7,2), seq(last-5,7,fix<2>)) ); + VERIFY_IS_APPROX( A(seq(1,n-1-2), seq(n-1-5,7)), A(seq(1,last-2), seq(last-5,7)) ); + VERIFY_IS_APPROX( A(seq(n-1-5,n-1-2), seq(n-1-5,n-1-2)), A(seq(last-5,last-2), seq(last-5,last-2)) ); + + VERIFY_IS_APPROX( A.col(A.cols()-1), A(all,last) ); + VERIFY_IS_APPROX( A(A.rows()-2, A.cols()/2), A(last-1, lastp1/2) ); + VERIFY_IS_APPROX( a(a.size()-2), a(last-1) ); + VERIFY_IS_APPROX( a(a.size()/2), a((last+1)/2) ); + + // Check fall-back to Block + { + VERIFY( is_same_eq(A.col(0), A(all,0)) ); + VERIFY( is_same_eq(A.row(0), A(0,all)) ); + VERIFY( is_same_eq(A.block(0,0,2,2), A(seqN(0,2),seq(0,1))) ); + VERIFY( is_same_eq(A.middleRows(2,4), A(seqN(2,4),all)) ); + VERIFY( is_same_eq(A.middleCols(2,4), A(all,seqN(2,4))) ); + + VERIFY( is_same_eq(A.col(A.cols()-1), A(all,last)) ); + + const ArrayXXi& cA(A); + VERIFY( is_same_eq(cA.col(0), cA(all,0)) ); + VERIFY( is_same_eq(cA.row(0), cA(0,all)) ); + VERIFY( is_same_eq(cA.block(0,0,2,2), cA(seqN(0,2),seq(0,1))) ); + VERIFY( is_same_eq(cA.middleRows(2,4), cA(seqN(2,4),all)) ); + VERIFY( is_same_eq(cA.middleCols(2,4), cA(all,seqN(2,4))) ); + + VERIFY( is_same_eq(a.head(4), a(seq(0,3))) ); + VERIFY( is_same_eq(a.tail(4), a(seqN(last-3,4))) ); + VERIFY( is_same_eq(a.tail(4), a(seq(lastp1-4,last))) ); + VERIFY( is_same_eq(a.segment<4>(3), a(seqN(3,fix<4>))) ); + } + + ArrayXXi A1=A, A2 = ArrayXXi::Random(4,4); + ArrayXi range25(4); range25 << 3,2,4,5; + A1(seqN(3,4),seq(2,5)) = A2; + VERIFY_IS_APPROX( A1.block(3,2,4,4), A2 ); + A1 = A; + A2.setOnes(); + A1(seq(6,3,-1),range25) = A2; + VERIFY_IS_APPROX( A1.block(3,2,4,4), A2 ); + + // check reverse + { + VERIFY( is_same_seq_type( seq(3,7).reverse(), seqN(7,5,fix<-1>) ) ); + VERIFY( is_same_seq_type( seq(7,3,fix<-2>).reverse(), seqN(3,3,fix<2>) ) ); + VERIFY_IS_APPROX( a(seqN(2,last/2).reverse()), a(seqN(2+(last/2-1)*1,last/2,fix<-1>)) ); + VERIFY_IS_APPROX( a(seqN(last/2,fix<4>).reverse()),a(seqN(last/2,fix<4>)).reverse() ); + VERIFY_IS_APPROX( A(seq(last-5,last-1,2).reverse(), seqN(last-3,3,fix<-2>).reverse()), + A(seq(last-5,last-1,2), seqN(last-3,3,fix<-2>)).reverse() ); + } + +#if EIGEN_HAS_CXX11 + // check lastN + VERIFY_IS_APPROX( a(lastN(3)), a.tail(3) ); + VERIFY( MATCH( a(lastN(3)), "7\n8\n9" ) ); + VERIFY_IS_APPROX( a(lastN(fix<3>())), a.tail<3>() ); + VERIFY( MATCH( a(lastN(3,2)), "5\n7\n9" ) ); + VERIFY( MATCH( a(lastN(3,fix<2>())), "5\n7\n9" ) ); + VERIFY( a(lastN(fix<3>())).SizeAtCompileTime == 3 ); + + VERIFY( (A(all, std::array<int,4>{{1,3,2,4}})).ColsAtCompileTime == 4); + + VERIFY_IS_APPROX( (A(std::array<int,3>{{1,3,5}}, std::array<int,4>{{9,6,3,0}})), A(seqN(1,3,2), seqN(9,4,-3)) ); + +#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE + VERIFY_IS_APPROX( A({3, 1, 6, 5}, all), A(std::array<int,4>{{3, 1, 6, 5}}, all) ); + VERIFY_IS_APPROX( A(all,{3, 1, 6, 5}), A(all,std::array<int,4>{{3, 1, 6, 5}}) ); + VERIFY_IS_APPROX( A({1,3,5},{3, 1, 6, 5}), A(std::array<int,3>{{1,3,5}},std::array<int,4>{{3, 1, 6, 5}}) ); + + VERIFY_IS_EQUAL( A({1,3,5},{3, 1, 6, 5}).RowsAtCompileTime, 3 ); + VERIFY_IS_EQUAL( A({1,3,5},{3, 1, 6, 5}).ColsAtCompileTime, 4 ); + + VERIFY_IS_APPROX( a({3, 1, 6, 5}), a(std::array<int,4>{{3, 1, 6, 5}}) ); + VERIFY_IS_EQUAL( a({1,3,5}).SizeAtCompileTime, 3 ); + + VERIFY_IS_APPROX( b({3, 1, 6, 5}), b(std::array<int,4>{{3, 1, 6, 5}}) ); + VERIFY_IS_EQUAL( b({1,3,5}).SizeAtCompileTime, 3 ); +#endif + +#endif + + // check mat(i,j) with weird types for i and j + { + VERIFY_IS_APPROX( A(B.RowsAtCompileTime-1, 1), A(3,1) ); + VERIFY_IS_APPROX( A(B.RowsAtCompileTime, 1), A(4,1) ); + VERIFY_IS_APPROX( A(B.RowsAtCompileTime-1, B.ColsAtCompileTime-1), A(3,3) ); + VERIFY_IS_APPROX( A(B.RowsAtCompileTime, B.ColsAtCompileTime), A(4,4) ); + const Index I_ = 3, J_ = 4; + VERIFY_IS_APPROX( A(I_,J_), A(3,4) ); + } + + // check extended block API + { + VERIFY( is_same_eq( A.block<3,4>(1,1), A.block(1,1,fix<3>,fix<4>)) ); + VERIFY( is_same_eq( A.block<3,4>(1,1,3,4), A.block(1,1,fix<3>(),fix<4>(4))) ); + VERIFY( is_same_eq( A.block<3,Dynamic>(1,1,3,4), A.block(1,1,fix<3>,4)) ); + VERIFY( is_same_eq( A.block<Dynamic,4>(1,1,3,4), A.block(1,1,fix<Dynamic>(3),fix<4>)) ); + VERIFY( is_same_eq( A.block(1,1,3,4), A.block(1,1,fix<Dynamic>(3),fix<Dynamic>(4))) ); + + VERIFY( is_same_eq( A.topLeftCorner<3,4>(), A.topLeftCorner(fix<3>,fix<4>)) ); + VERIFY( is_same_eq( A.bottomLeftCorner<3,4>(), A.bottomLeftCorner(fix<3>,fix<4>)) ); + VERIFY( is_same_eq( A.bottomRightCorner<3,4>(), A.bottomRightCorner(fix<3>,fix<4>)) ); + VERIFY( is_same_eq( A.topRightCorner<3,4>(), A.topRightCorner(fix<3>,fix<4>)) ); + + VERIFY( is_same_eq( A.leftCols<3>(), A.leftCols(fix<3>)) ); + VERIFY( is_same_eq( A.rightCols<3>(), A.rightCols(fix<3>)) ); + VERIFY( is_same_eq( A.middleCols<3>(1), A.middleCols(1,fix<3>)) ); + + VERIFY( is_same_eq( A.topRows<3>(), A.topRows(fix<3>)) ); + VERIFY( is_same_eq( A.bottomRows<3>(), A.bottomRows(fix<3>)) ); + VERIFY( is_same_eq( A.middleRows<3>(1), A.middleRows(1,fix<3>)) ); + + VERIFY( is_same_eq( a.segment<3>(1), a.segment(1,fix<3>)) ); + VERIFY( is_same_eq( a.head<3>(), a.head(fix<3>)) ); + VERIFY( is_same_eq( a.tail<3>(), a.tail(fix<3>)) ); + + const ArrayXXi& cA(A); + VERIFY( is_same_eq( cA.block<Dynamic,4>(1,1,3,4), cA.block(1,1,fix<Dynamic>(3),fix<4>)) ); + + VERIFY( is_same_eq( cA.topLeftCorner<3,4>(), cA.topLeftCorner(fix<3>,fix<4>)) ); + VERIFY( is_same_eq( cA.bottomLeftCorner<3,4>(), cA.bottomLeftCorner(fix<3>,fix<4>)) ); + VERIFY( is_same_eq( cA.bottomRightCorner<3,4>(), cA.bottomRightCorner(fix<3>,fix<4>)) ); + VERIFY( is_same_eq( cA.topRightCorner<3,4>(), cA.topRightCorner(fix<3>,fix<4>)) ); + + VERIFY( is_same_eq( cA.leftCols<3>(), cA.leftCols(fix<3>)) ); + VERIFY( is_same_eq( cA.rightCols<3>(), cA.rightCols(fix<3>)) ); + VERIFY( is_same_eq( cA.middleCols<3>(1), cA.middleCols(1,fix<3>)) ); + + VERIFY( is_same_eq( cA.topRows<3>(), cA.topRows(fix<3>)) ); + VERIFY( is_same_eq( cA.bottomRows<3>(), cA.bottomRows(fix<3>)) ); + VERIFY( is_same_eq( cA.middleRows<3>(1), cA.middleRows(1,fix<3>)) ); + } + + // Check compilation of enums as index type: + a(XX) = 1; + A(XX,YY) = 1; + // Anonymous enums only work with C++11 +#if EIGEN_HAS_CXX11 + enum { X=0, Y=1 }; + a(X) = 1; + A(X,Y) = 1; + A(XX,Y) = 1; + A(X,YY) = 1; +#endif + + // Check compilation of varying integer types as index types: + Index i = n/2; + short i_short(i); + std::size_t i_sizet(i); + VERIFY_IS_EQUAL( a(i), a.coeff(i_short) ); + VERIFY_IS_EQUAL( a(i), a.coeff(i_sizet) ); + + VERIFY_IS_EQUAL( A(i,i), A.coeff(i_short, i_short) ); + VERIFY_IS_EQUAL( A(i,i), A.coeff(i_short, i) ); + VERIFY_IS_EQUAL( A(i,i), A.coeff(i, i_short) ); + VERIFY_IS_EQUAL( A(i,i), A.coeff(i, i_sizet) ); + VERIFY_IS_EQUAL( A(i,i), A.coeff(i_sizet, i) ); + VERIFY_IS_EQUAL( A(i,i), A.coeff(i_sizet, i_short) ); + VERIFY_IS_EQUAL( A(i,i), A.coeff(5, i_sizet) ); + + // Regression test for Max{Rows,Cols}AtCompileTime + { + Matrix3i A3 = Matrix3i::Random(); + ArrayXi ind(5); ind << 1,1,1,1,1; + VERIFY_IS_EQUAL( A3(ind,ind).eval(), MatrixXi::Constant(5,5,A3(1,1)) ); + } + + // Regression for bug 1736 + { + VERIFY_IS_APPROX(A(all, eii).col(0).eval(), A.col(eii(0))); + A(all, eii).col(0) = A.col(eii(0)); + } + + // bug 1815: IndexedView should allow linear access + { + VERIFY( MATCH( b(eii)(0), "3" ) ); + VERIFY( MATCH( a(eii)(0), "3" ) ); + VERIFY( MATCH( A(1,eii)(0), "103")); + VERIFY( MATCH( A(eii,1)(0), "301")); + VERIFY( MATCH( A(1,all)(1), "101")); + VERIFY( MATCH( A(all,1)(1), "101")); + } + +#if EIGEN_HAS_CXX11 + //Bug IndexView with a single static row should be RowMajor: + { + // A(1, seq(0,2,1)).cwiseAbs().colwise().replicate(2).eval(); + STATIC_CHECK(( (internal::evaluator<decltype( A(1,seq(0,2,1)) )>::Flags & RowMajorBit) == RowMajorBit )); + } +#endif + +} + +EIGEN_DECLARE_TEST(indexed_view) +{ +// for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( check_indexed_view() ); + CALL_SUBTEST_2( check_indexed_view() ); + CALL_SUBTEST_3( check_indexed_view() ); +// } + + // static checks of some internals: + STATIC_CHECK(( internal::is_valid_index_type<int>::value )); + STATIC_CHECK(( internal::is_valid_index_type<unsigned int>::value )); + STATIC_CHECK(( internal::is_valid_index_type<short>::value )); + STATIC_CHECK(( internal::is_valid_index_type<std::ptrdiff_t>::value )); + STATIC_CHECK(( internal::is_valid_index_type<std::size_t>::value )); + STATIC_CHECK(( !internal::valid_indexed_view_overload<int,int>::value )); + STATIC_CHECK(( !internal::valid_indexed_view_overload<int,std::ptrdiff_t>::value )); + STATIC_CHECK(( !internal::valid_indexed_view_overload<std::ptrdiff_t,int>::value )); + STATIC_CHECK(( !internal::valid_indexed_view_overload<std::size_t,int>::value )); +} diff --git a/test/initializer_list_construction.cpp b/test/initializer_list_construction.cpp new file mode 100644 index 000000000..7a9c49e8d --- /dev/null +++ b/test/initializer_list_construction.cpp @@ -0,0 +1,385 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 David Tellenbach <david.tellenbach@tellnotes.org> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_NO_STATIC_ASSERT + +#include "main.h" + +template<typename Scalar, bool is_integer = NumTraits<Scalar>::IsInteger> +struct TestMethodDispatching { + static void run() {} +}; + +template<typename Scalar> +struct TestMethodDispatching<Scalar, 1> { + static void run() + { + { + Matrix<Scalar, Dynamic, Dynamic> m {3, 4}; + Array<Scalar, Dynamic, Dynamic> a {3, 4}; + VERIFY(m.rows() == 3); + VERIFY(m.cols() == 4); + VERIFY(a.rows() == 3); + VERIFY(a.cols() == 4); + } + { + Matrix<Scalar, 1, 2> m {3, 4}; + Array<Scalar, 1, 2> a {3, 4}; + VERIFY(m(0) == 3); + VERIFY(m(1) == 4); + VERIFY(a(0) == 3); + VERIFY(a(1) == 4); + } + { + Matrix<Scalar, 2, 1> m {3, 4}; + Array<Scalar, 2, 1> a {3, 4}; + VERIFY(m(0) == 3); + VERIFY(m(1) == 4); + VERIFY(a(0) == 3); + VERIFY(a(1) == 4); + } + } +}; + +template<typename Vec4, typename Vec5> void fixedsizeVariadicVectorConstruction2() +{ + { + Vec4 ref = Vec4::Random(); + Vec4 v{ ref[0], ref[1], ref[2], ref[3] }; + VERIFY_IS_APPROX(v, ref); + VERIFY_IS_APPROX(v, (Vec4( ref[0], ref[1], ref[2], ref[3] ))); + VERIFY_IS_APPROX(v, (Vec4({ref[0], ref[1], ref[2], ref[3]}))); + + Vec4 v2 = { ref[0], ref[1], ref[2], ref[3] }; + VERIFY_IS_APPROX(v2, ref); + } + { + Vec5 ref = Vec5::Random(); + Vec5 v{ ref[0], ref[1], ref[2], ref[3], ref[4] }; + VERIFY_IS_APPROX(v, ref); + VERIFY_IS_APPROX(v, (Vec5( ref[0], ref[1], ref[2], ref[3], ref[4] ))); + VERIFY_IS_APPROX(v, (Vec5({ref[0], ref[1], ref[2], ref[3], ref[4]}))); + + Vec5 v2 = { ref[0], ref[1], ref[2], ref[3], ref[4] }; + VERIFY_IS_APPROX(v2, ref); + } +} + +#define CHECK_MIXSCALAR_V5_APPROX(V, A0, A1, A2, A3, A4) { \ + VERIFY_IS_APPROX(V[0], Scalar(A0) ); \ + VERIFY_IS_APPROX(V[1], Scalar(A1) ); \ + VERIFY_IS_APPROX(V[2], Scalar(A2) ); \ + VERIFY_IS_APPROX(V[3], Scalar(A3) ); \ + VERIFY_IS_APPROX(V[4], Scalar(A4) ); \ +} + +#define CHECK_MIXSCALAR_V5(VEC5, A0, A1, A2, A3, A4) { \ + typedef VEC5::Scalar Scalar; \ + VEC5 v = { A0 , A1 , A2 , A3 , A4 }; \ + CHECK_MIXSCALAR_V5_APPROX(v, A0 , A1 , A2 , A3 , A4); \ +} + +template<int> void fixedsizeVariadicVectorConstruction3() +{ + typedef Matrix<double,5,1> Vec5; + typedef Array<float,5,1> Arr5; + CHECK_MIXSCALAR_V5(Vec5, 1, 2., -3, 4.121, 5.53252); + CHECK_MIXSCALAR_V5(Arr5, 1, 2., 3.12f, 4.121, 5.53252); +} + +template<typename Scalar> void fixedsizeVariadicVectorConstruction() +{ + CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Matrix<Scalar,4,1>, Matrix<Scalar,5,1> >() )); + CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Matrix<Scalar,1,4>, Matrix<Scalar,1,5> >() )); + CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Array<Scalar,4,1>, Array<Scalar,5,1> >() )); + CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2<Array<Scalar,1,4>, Array<Scalar,1,5> >() )); +} + + +template<typename Scalar> void initializerListVectorConstruction() +{ + Scalar raw[4]; + for(int k = 0; k < 4; ++k) { + raw[k] = internal::random<Scalar>(); + } + { + Matrix<Scalar, 4, 1> m { {raw[0]}, {raw[1]},{raw[2]},{raw[3]} }; + Array<Scalar, 4, 1> a { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }; + for(int k = 0; k < 4; ++k) { + VERIFY(m(k) == raw[k]); + } + for(int k = 0; k < 4; ++k) { + VERIFY(a(k) == raw[k]); + } + VERIFY_IS_EQUAL(m, (Matrix<Scalar,4,1>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))); + VERIFY((a == (Array<Scalar,4,1>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))).all()); + } + { + Matrix<Scalar, 1, 4> m { {raw[0], raw[1], raw[2], raw[3]} }; + Array<Scalar, 1, 4> a { {raw[0], raw[1], raw[2], raw[3]} }; + for(int k = 0; k < 4; ++k) { + VERIFY(m(k) == raw[k]); + } + for(int k = 0; k < 4; ++k) { + VERIFY(a(k) == raw[k]); + } + VERIFY_IS_EQUAL(m, (Matrix<Scalar, 1, 4>({{raw[0],raw[1],raw[2],raw[3]}}))); + VERIFY((a == (Array<Scalar, 1, 4>({{raw[0],raw[1],raw[2],raw[3]}}))).all()); + } + { + Matrix<Scalar, 4, Dynamic> m { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }; + Array<Scalar, 4, Dynamic> a { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }; + for(int k=0; k < 4; ++k) { + VERIFY(m(k) == raw[k]); + } + for(int k=0; k < 4; ++k) { + VERIFY(a(k) == raw[k]); + } + VERIFY_IS_EQUAL(m, (Matrix<Scalar, 4, Dynamic>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))); + VERIFY((a == (Array<Scalar, 4, Dynamic>({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))).all()); + } + { + Matrix<Scalar, Dynamic, 4> m {{raw[0],raw[1],raw[2],raw[3]}}; + Array<Scalar, Dynamic, 4> a {{raw[0],raw[1],raw[2],raw[3]}}; + for(int k=0; k < 4; ++k) { + VERIFY(m(k) == raw[k]); + } + for(int k=0; k < 4; ++k) { + VERIFY(a(k) == raw[k]); + } + VERIFY_IS_EQUAL(m, (Matrix<Scalar, Dynamic, 4>({{raw[0],raw[1],raw[2],raw[3]}}))); + VERIFY((a == (Array<Scalar, Dynamic, 4>({{raw[0],raw[1],raw[2],raw[3]}}))).all()); + } +} + +template<typename Scalar> void initializerListMatrixConstruction() +{ + const Index RowsAtCompileTime = 5; + const Index ColsAtCompileTime = 4; + const Index SizeAtCompileTime = RowsAtCompileTime * ColsAtCompileTime; + + Scalar raw[SizeAtCompileTime]; + for (int i = 0; i < SizeAtCompileTime; ++i) { + raw[i] = internal::random<Scalar>(); + } + { + Matrix<Scalar, Dynamic, Dynamic> m {}; + VERIFY(m.cols() == 0); + VERIFY(m.rows() == 0); + VERIFY_IS_EQUAL(m, (Matrix<Scalar, Dynamic, Dynamic>())); + } + { + Matrix<Scalar, 5, 4> m { + {raw[0], raw[1], raw[2], raw[3]}, + {raw[4], raw[5], raw[6], raw[7]}, + {raw[8], raw[9], raw[10], raw[11]}, + {raw[12], raw[13], raw[14], raw[15]}, + {raw[16], raw[17], raw[18], raw[19]} + }; + + Matrix<Scalar, 5, 4> m2; + m2 << raw[0], raw[1], raw[2], raw[3], + raw[4], raw[5], raw[6], raw[7], + raw[8], raw[9], raw[10], raw[11], + raw[12], raw[13], raw[14], raw[15], + raw[16], raw[17], raw[18], raw[19]; + + int k = 0; + for(int i = 0; i < RowsAtCompileTime; ++i) { + for (int j = 0; j < ColsAtCompileTime; ++j) { + VERIFY(m(i, j) == raw[k]); + ++k; + } + } + VERIFY_IS_EQUAL(m, m2); + } + { + Matrix<Scalar, Dynamic, Dynamic> m{ + {raw[0], raw[1], raw[2], raw[3]}, + {raw[4], raw[5], raw[6], raw[7]}, + {raw[8], raw[9], raw[10], raw[11]}, + {raw[12], raw[13], raw[14], raw[15]}, + {raw[16], raw[17], raw[18], raw[19]} + }; + + VERIFY(m.cols() == 4); + VERIFY(m.rows() == 5); + int k = 0; + for(int i = 0; i < RowsAtCompileTime; ++i) { + for (int j = 0; j < ColsAtCompileTime; ++j) { + VERIFY(m(i, j) == raw[k]); + ++k; + } + } + + Matrix<Scalar, Dynamic, Dynamic> m2(RowsAtCompileTime, ColsAtCompileTime); + k = 0; + for(int i = 0; i < RowsAtCompileTime; ++i) { + for (int j = 0; j < ColsAtCompileTime; ++j) { + m2(i, j) = raw[k]; + ++k; + } + } + VERIFY_IS_EQUAL(m, m2); + } +} + +template<typename Scalar> void initializerListArrayConstruction() +{ + const Index RowsAtCompileTime = 5; + const Index ColsAtCompileTime = 4; + const Index SizeAtCompileTime = RowsAtCompileTime * ColsAtCompileTime; + + Scalar raw[SizeAtCompileTime]; + for (int i = 0; i < SizeAtCompileTime; ++i) { + raw[i] = internal::random<Scalar>(); + } + { + Array<Scalar, Dynamic, Dynamic> a {}; + VERIFY(a.cols() == 0); + VERIFY(a.rows() == 0); + } + { + Array<Scalar, 5, 4> m { + {raw[0], raw[1], raw[2], raw[3]}, + {raw[4], raw[5], raw[6], raw[7]}, + {raw[8], raw[9], raw[10], raw[11]}, + {raw[12], raw[13], raw[14], raw[15]}, + {raw[16], raw[17], raw[18], raw[19]} + }; + + Array<Scalar, 5, 4> m2; + m2 << raw[0], raw[1], raw[2], raw[3], + raw[4], raw[5], raw[6], raw[7], + raw[8], raw[9], raw[10], raw[11], + raw[12], raw[13], raw[14], raw[15], + raw[16], raw[17], raw[18], raw[19]; + + int k = 0; + for(int i = 0; i < RowsAtCompileTime; ++i) { + for (int j = 0; j < ColsAtCompileTime; ++j) { + VERIFY(m(i, j) == raw[k]); + ++k; + } + } + VERIFY_IS_APPROX(m, m2); + } + { + Array<Scalar, Dynamic, Dynamic> m { + {raw[0], raw[1], raw[2], raw[3]}, + {raw[4], raw[5], raw[6], raw[7]}, + {raw[8], raw[9], raw[10], raw[11]}, + {raw[12], raw[13], raw[14], raw[15]}, + {raw[16], raw[17], raw[18], raw[19]} + }; + + VERIFY(m.cols() == 4); + VERIFY(m.rows() == 5); + int k = 0; + for(int i = 0; i < RowsAtCompileTime; ++i) { + for (int j = 0; j < ColsAtCompileTime; ++j) { + VERIFY(m(i, j) == raw[k]); + ++k; + } + } + + Array<Scalar, Dynamic, Dynamic> m2(RowsAtCompileTime, ColsAtCompileTime); + k = 0; + for(int i = 0; i < RowsAtCompileTime; ++i) { + for (int j = 0; j < ColsAtCompileTime; ++j) { + m2(i, j) = raw[k]; + ++k; + } + } + VERIFY_IS_APPROX(m, m2); + } +} + +template<typename Scalar> void dynamicVectorConstruction() +{ + const Index size = 4; + Scalar raw[size]; + for (int i = 0; i < size; ++i) { + raw[i] = internal::random<Scalar>(); + } + + typedef Matrix<Scalar, Dynamic, 1> VectorX; + + { + VectorX v {{raw[0], raw[1], raw[2], raw[3]}}; + for (int i = 0; i < size; ++i) { + VERIFY(v(i) == raw[i]); + } + VERIFY(v.rows() == size); + VERIFY(v.cols() == 1); + VERIFY_IS_EQUAL(v, (VectorX {{raw[0], raw[1], raw[2], raw[3]}})); + } + + { + VERIFY_RAISES_ASSERT((VectorX {raw[0], raw[1], raw[2], raw[3]})); + } + { + VERIFY_RAISES_ASSERT((VectorX { + {raw[0], raw[1], raw[2], raw[3]}, + {raw[0], raw[1], raw[2], raw[3]}, + })); + } +} + +EIGEN_DECLARE_TEST(initializer_list_construction) +{ + CALL_SUBTEST_1(initializerListVectorConstruction<unsigned char>()); + CALL_SUBTEST_1(initializerListVectorConstruction<float>()); + CALL_SUBTEST_1(initializerListVectorConstruction<double>()); + CALL_SUBTEST_1(initializerListVectorConstruction<int>()); + CALL_SUBTEST_1(initializerListVectorConstruction<long int>()); + CALL_SUBTEST_1(initializerListVectorConstruction<std::ptrdiff_t>()); + CALL_SUBTEST_1(initializerListVectorConstruction<std::complex<double>>()); + CALL_SUBTEST_1(initializerListVectorConstruction<std::complex<float>>()); + + CALL_SUBTEST_2(initializerListMatrixConstruction<unsigned char>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<float>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<double>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<int>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<long int>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<std::ptrdiff_t>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<std::complex<double>>()); + CALL_SUBTEST_2(initializerListMatrixConstruction<std::complex<float>>()); + + CALL_SUBTEST_3(initializerListArrayConstruction<unsigned char>()); + CALL_SUBTEST_3(initializerListArrayConstruction<float>()); + CALL_SUBTEST_3(initializerListArrayConstruction<double>()); + CALL_SUBTEST_3(initializerListArrayConstruction<int>()); + CALL_SUBTEST_3(initializerListArrayConstruction<long int>()); + CALL_SUBTEST_3(initializerListArrayConstruction<std::ptrdiff_t>()); + CALL_SUBTEST_3(initializerListArrayConstruction<std::complex<double>>()); + CALL_SUBTEST_3(initializerListArrayConstruction<std::complex<float>>()); + + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<unsigned char>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<float>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<double>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<int>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<long int>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<std::ptrdiff_t>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<std::complex<double>>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction<std::complex<float>>()); + CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction3<0>()); + + CALL_SUBTEST_5(TestMethodDispatching<int>::run()); + CALL_SUBTEST_5(TestMethodDispatching<long int>::run()); + + CALL_SUBTEST_6(dynamicVectorConstruction<unsigned char>()); + CALL_SUBTEST_6(dynamicVectorConstruction<float>()); + CALL_SUBTEST_6(dynamicVectorConstruction<double>()); + CALL_SUBTEST_6(dynamicVectorConstruction<int>()); + CALL_SUBTEST_6(dynamicVectorConstruction<long int>()); + CALL_SUBTEST_6(dynamicVectorConstruction<std::ptrdiff_t>()); + CALL_SUBTEST_6(dynamicVectorConstruction<std::complex<double>>()); + CALL_SUBTEST_6(dynamicVectorConstruction<std::complex<float>>()); +} diff --git a/test/inplace_decomposition.cpp b/test/inplace_decomposition.cpp index 92d0d91b6..e3aa9957d 100644 --- a/test/inplace_decomposition.cpp +++ b/test/inplace_decomposition.cpp @@ -79,7 +79,7 @@ template<typename DecType,typename MatrixType> void inplace(bool square = false, } -void test_inplace_decomposition() +EIGEN_DECLARE_TEST(inplace_decomposition) { EIGEN_UNUSED typedef Matrix<double,4,3> Matrix43d; for(int i = 0; i < g_repeat; i++) { diff --git a/test/integer_types.cpp b/test/integer_types.cpp index a21f73a81..31f4100c5 100644 --- a/test/integer_types.cpp +++ b/test/integer_types.cpp @@ -18,7 +18,6 @@ template<typename MatrixType> void signed_integer_type_tests(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { is_signed = (Scalar(-1) > Scalar(0)) ? 0 : 1 }; @@ -49,7 +48,6 @@ template<typename MatrixType> void signed_integer_type_tests(const MatrixType& m template<typename MatrixType> void integer_type_tests(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; VERIFY(NumTraits<Scalar>::IsInteger); @@ -133,7 +131,18 @@ template<typename MatrixType> void integer_type_tests(const MatrixType& m) VERIFY_IS_APPROX((m1 * m2.transpose()) * m1, m1 * (m2.transpose() * m1)); } -void test_integer_types() +template<int> +void integer_types_extra() +{ + VERIFY_IS_EQUAL(int(internal::scalar_div_cost<int>::value), 8); + VERIFY_IS_EQUAL(int(internal::scalar_div_cost<unsigned int>::value), 8); + if(sizeof(long)>sizeof(int)) { + VERIFY(int(internal::scalar_div_cost<long>::value) > int(internal::scalar_div_cost<int>::value)); + VERIFY(int(internal::scalar_div_cost<unsigned long>::value) > int(internal::scalar_div_cost<int>::value)); + } +} + +EIGEN_DECLARE_TEST(integer_types) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( integer_type_tests(Matrix<unsigned int, 1, 1>()) ); @@ -153,17 +162,12 @@ void test_integer_types() CALL_SUBTEST_6( integer_type_tests(Matrix<unsigned short, 4, 4>()) ); +#if EIGEN_HAS_CXX11 CALL_SUBTEST_7( integer_type_tests(Matrix<long long, 11, 13>()) ); CALL_SUBTEST_7( signed_integer_type_tests(Matrix<long long, 11, 13>()) ); CALL_SUBTEST_8( integer_type_tests(Matrix<unsigned long long, Dynamic, 5>(1, 5)) ); - } -#ifdef EIGEN_TEST_PART_9 - VERIFY_IS_EQUAL(internal::scalar_div_cost<int>::value, 8); - VERIFY_IS_EQUAL(internal::scalar_div_cost<unsigned int>::value, 8); - if(sizeof(long)>sizeof(int)) { - VERIFY(internal::scalar_div_cost<long>::value > internal::scalar_div_cost<int>::value); - VERIFY(internal::scalar_div_cost<unsigned long>::value > internal::scalar_div_cost<int>::value); - } #endif + } + CALL_SUBTEST_9( integer_types_extra<0>() ); } diff --git a/test/inverse.cpp b/test/inverse.cpp index 5c6777a18..9cedfa1e1 100644 --- a/test/inverse.cpp +++ b/test/inverse.cpp @@ -11,43 +11,26 @@ #include "main.h" #include <Eigen/LU> -template<typename MatrixType> void inverse(const MatrixType& m) +template<typename MatrixType> +void inverse_for_fixed_size(const MatrixType&, typename internal::enable_if<MatrixType::SizeAtCompileTime==Dynamic>::type* = 0) { - using std::abs; - typedef typename MatrixType::Index Index; - /* this test covers the following files: - Inverse.h - */ - Index rows = m.rows(); - Index cols = m.cols(); - - typedef typename MatrixType::Scalar Scalar; - - MatrixType m1(rows, cols), - m2(rows, cols), - identity = MatrixType::Identity(rows, rows); - createRandomPIMatrixOfRank(rows,rows,rows,m1); - m2 = m1.inverse(); - VERIFY_IS_APPROX(m1, m2.inverse() ); - - VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5)); - - VERIFY_IS_APPROX(identity, m1.inverse() * m1 ); - VERIFY_IS_APPROX(identity, m1 * m1.inverse() ); +} - VERIFY_IS_APPROX(m1, m1.inverse().inverse() ); +template<typename MatrixType> +void inverse_for_fixed_size(const MatrixType& m1, typename internal::enable_if<MatrixType::SizeAtCompileTime!=Dynamic>::type* = 0) +{ + using std::abs; - // since for the general case we implement separately row-major and col-major, test that - VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose())); + MatrixType m2, identity = MatrixType::Identity(); -#if !defined(EIGEN_TEST_PART_5) && !defined(EIGEN_TEST_PART_6) + typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType; //computeInverseAndDetWithCheck tests //First: an invertible matrix bool invertible; - RealScalar det; + Scalar det; m2.setZero(); m1.computeInverseAndDetWithCheck(m2, det, invertible); @@ -61,23 +44,52 @@ template<typename MatrixType> void inverse(const MatrixType& m) VERIFY_IS_APPROX(identity, m1*m2); //Second: a rank one matrix (not invertible, except for 1x1 matrices) - VectorType v3 = VectorType::Random(rows); - MatrixType m3 = v3*v3.transpose(), m4(rows,cols); + VectorType v3 = VectorType::Random(); + MatrixType m3 = v3*v3.transpose(), m4; m3.computeInverseAndDetWithCheck(m4, det, invertible); - VERIFY( rows==1 ? invertible : !invertible ); + VERIFY( m1.rows()==1 ? invertible : !invertible ); VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1)); m3.computeInverseWithCheck(m4, invertible); - VERIFY( rows==1 ? invertible : !invertible ); + VERIFY( m1.rows()==1 ? invertible : !invertible ); // check with submatrices { Matrix<Scalar, MatrixType::RowsAtCompileTime+1, MatrixType::RowsAtCompileTime+1, MatrixType::Options> m5; m5.setRandom(); - m5.topLeftCorner(rows,rows) = m1; + m5.topLeftCorner(m1.rows(),m1.rows()) = m1; m2 = m5.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>().inverse(); VERIFY_IS_APPROX( (m5.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>()), m2.inverse() ); } -#endif +} + +template<typename MatrixType> void inverse(const MatrixType& m) +{ + /* this test covers the following files: + Inverse.h + */ + Index rows = m.rows(); + Index cols = m.cols(); + + typedef typename MatrixType::Scalar Scalar; + + MatrixType m1(rows, cols), + m2(rows, cols), + identity = MatrixType::Identity(rows, rows); + createRandomPIMatrixOfRank(rows,rows,rows,m1); + m2 = m1.inverse(); + VERIFY_IS_APPROX(m1, m2.inverse() ); + + VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5)); + + VERIFY_IS_APPROX(identity, m1.inverse() * m1 ); + VERIFY_IS_APPROX(identity, m1 * m1.inverse() ); + + VERIFY_IS_APPROX(m1, m1.inverse().inverse() ); + + // since for the general case we implement separately row-major and col-major, test that + VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose())); + + inverse_for_fixed_size(m1); // check in-place inversion if(MatrixType::RowsAtCompileTime>=2 && MatrixType::RowsAtCompileTime<=4) @@ -93,7 +105,23 @@ template<typename MatrixType> void inverse(const MatrixType& m) } } -void test_inverse() +template<typename Scalar> +void inverse_zerosized() +{ + Matrix<Scalar,Dynamic,Dynamic> A(0,0); + { + Matrix<Scalar,0,1> b, x; + x = A.inverse() * b; + } + { + Matrix<Scalar,Dynamic,Dynamic> b(0,1), x; + x = A.inverse() * b; + VERIFY_IS_EQUAL(x.rows(), 0); + VERIFY_IS_EQUAL(x.cols(), 1); + } +} + +EIGEN_DECLARE_TEST(inverse) { int s = 0; for(int i = 0; i < g_repeat; i++) { @@ -106,6 +134,9 @@ void test_inverse() s = internal::random<int>(50,320); CALL_SUBTEST_5( inverse(MatrixXf(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) + CALL_SUBTEST_5( inverse_zerosized<float>() ); + CALL_SUBTEST_5( inverse(MatrixXf(0, 0)) ); + CALL_SUBTEST_5( inverse(MatrixXf(1, 1)) ); s = internal::random<int>(25,100); CALL_SUBTEST_6( inverse(MatrixXcd(s,s)) ); @@ -113,5 +144,7 @@ void test_inverse() CALL_SUBTEST_7( inverse(Matrix4d()) ); CALL_SUBTEST_7( inverse(Matrix<double,4,4,DontAlign>()) ); + + CALL_SUBTEST_8( inverse(Matrix4cd()) ); } } diff --git a/test/io.cpp b/test/io.cpp new file mode 100644 index 000000000..aa14e76e9 --- /dev/null +++ b/test/io.cpp @@ -0,0 +1,71 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 Joel Holdsworth <joel.holdsworth@vcatechnology.com> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include <sstream> + +#include "main.h" + +template<typename Scalar> +struct check_ostream_impl +{ + static void run() + { + const Array<Scalar,1,1> array(123); + std::ostringstream ss; + ss << array; + VERIFY(ss.str() == "123"); + + check_ostream_impl< std::complex<Scalar> >::run(); + }; +}; + +template<> +struct check_ostream_impl<bool> +{ + static void run() + { + const Array<bool,1,2> array(1, 0); + std::ostringstream ss; + ss << array; + VERIFY(ss.str() == "1 0"); + }; +}; + +template<typename Scalar> +struct check_ostream_impl< std::complex<Scalar> > +{ + static void run() + { + const Array<std::complex<Scalar>,1,1> array(std::complex<Scalar>(12, 34)); + std::ostringstream ss; + ss << array; + VERIFY(ss.str() == "(12,34)"); + }; +}; + +template<typename Scalar> +static void check_ostream() +{ + check_ostream_impl<Scalar>::run(); +} + +EIGEN_DECLARE_TEST(rand) +{ + CALL_SUBTEST(check_ostream<bool>()); + CALL_SUBTEST(check_ostream<float>()); + CALL_SUBTEST(check_ostream<double>()); + CALL_SUBTEST(check_ostream<Eigen::numext::int8_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::uint8_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::int16_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::uint16_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::int32_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::uint32_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::int64_t>()); + CALL_SUBTEST(check_ostream<Eigen::numext::uint64_t>()); +} diff --git a/test/is_same_dense.cpp b/test/is_same_dense.cpp index 2c7838ce9..23dd806eb 100644 --- a/test/is_same_dense.cpp +++ b/test/is_same_dense.cpp @@ -11,12 +11,16 @@ using internal::is_same_dense; -void test_is_same_dense() +EIGEN_DECLARE_TEST(is_same_dense) { typedef Matrix<double,Dynamic,Dynamic,ColMajor> ColMatrixXd; + typedef Matrix<std::complex<double>,Dynamic,Dynamic,ColMajor> ColMatrixXcd; ColMatrixXd m1(10,10); + ColMatrixXcd m2(10,10); Ref<ColMatrixXd> ref_m1(m1); + Ref<ColMatrixXd,0, Stride<Dynamic,Dynamic> > ref_m2_real(m2.real()); Ref<const ColMatrixXd> const_ref_m1(m1); + VERIFY(is_same_dense(m1,m1)); VERIFY(is_same_dense(m1,ref_m1)); VERIFY(is_same_dense(const_ref_m1,m1)); @@ -30,4 +34,8 @@ void test_is_same_dense() Ref<const ColMatrixXd> const_ref_m1_col(m1.col(1)); VERIFY(is_same_dense(m1.col(1),const_ref_m1_col)); + + + VERIFY(!is_same_dense(m1, ref_m2_real)); + VERIFY(!is_same_dense(m2, ref_m2_real)); } diff --git a/test/jacobi.cpp b/test/jacobi.cpp index 7ccd4124b..5604797f5 100644 --- a/test/jacobi.cpp +++ b/test/jacobi.cpp @@ -14,7 +14,6 @@ template<typename MatrixType, typename JacobiScalar> void jacobi(const MatrixType& m = MatrixType()) { - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -58,7 +57,7 @@ void jacobi(const MatrixType& m = MatrixType()) } } -void test_jacobi() +EIGEN_DECLARE_TEST(jacobi) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( jacobi<Matrix3f, float>() )); diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp index 7f5f71562..5b15c5a27 100644 --- a/test/jacobisvd.cpp +++ b/test/jacobisvd.cpp @@ -36,7 +36,9 @@ void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m) { svd_verify_assert<JacobiSVD<MatrixType> >(m); - typedef typename MatrixType::Index Index; + svd_verify_assert<JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> >(m, true); + svd_verify_assert<JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner> >(m); + svd_verify_assert<JacobiSVD<MatrixType, HouseholderQRPreconditioner> >(m); Index rows = m.rows(); Index cols = m.cols(); @@ -68,9 +70,26 @@ void jacobisvd_method() VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU()); VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV()); VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); } -void test_jacobisvd() +namespace Foo { +// older compiler require a default constructor for Bar +// cf: https://stackoverflow.com/questions/7411515/ +class Bar {public: Bar() {}}; +bool operator<(const Bar&, const Bar&) { return true; } +} +// regression test for a very strange MSVC issue for which simply +// including SVDBase.h messes up with std::max and custom scalar type +void msvc_workaround() +{ + const Foo::Bar a; + const Foo::Bar b; + std::max EIGEN_NOT_A_MACRO (a,b); +} + +EIGEN_DECLARE_TEST(jacobisvd) { CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) )); CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) )); @@ -123,4 +142,6 @@ void test_jacobisvd() CALL_SUBTEST_9( svd_preallocate<void>() ); CALL_SUBTEST_2( svd_underoverflow<void>() ); + + msvc_workaround(); } diff --git a/test/klu_support.cpp b/test/klu_support.cpp new file mode 100644 index 000000000..f806ad50e --- /dev/null +++ b/test/klu_support.cpp @@ -0,0 +1,32 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS +#include "sparse_solver.h" + +#include <Eigen/KLUSupport> + +template<typename T> void test_klu_support_T() +{ + KLU<SparseMatrix<T, ColMajor> > klu_colmajor; + KLU<SparseMatrix<T, RowMajor> > klu_rowmajor; + + check_sparse_square_solving(klu_colmajor); + check_sparse_square_solving(klu_rowmajor); + + //check_sparse_square_determinant(umfpack_colmajor); + //check_sparse_square_determinant(umfpack_rowmajor); +} + +EIGEN_DECLARE_TEST(klu_support) +{ + CALL_SUBTEST_1(test_klu_support_T<double>()); + CALL_SUBTEST_2(test_klu_support_T<std::complex<double> >()); +} + diff --git a/test/linearstructure.cpp b/test/linearstructure.cpp index 17474af10..46ee5162b 100644 --- a/test/linearstructure.cpp +++ b/test/linearstructure.cpp @@ -19,7 +19,6 @@ template<typename MatrixType> void linearStructure(const MatrixType& m) /* this test covers the following files: CwiseUnaryOp.h, CwiseBinaryOp.h, SelfCwiseBinaryOp.h */ - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -111,7 +110,20 @@ template<typename MatrixType> void real_complex(DenseIndex rows = MatrixType::Ro VERIFY(g_called && "matrix<complex> - real not properly optimized"); } -void test_linearstructure() +template<int> +void linearstructure_overflow() +{ + // make sure that /=scalar and /scalar do not overflow + // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not + Matrix4d m2, m3; + m3 = m2 = Matrix4d::Random()*1e-20; + m2 = m2 / 4.9e-320; + VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones()); + m3 /= 4.9e-320; + VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones()); +} + +EIGEN_DECLARE_TEST(linearstructure) { g_called = true; VERIFY(g_called); // avoid `unneeded-internal-declaration` warning. @@ -131,19 +143,5 @@ void test_linearstructure() CALL_SUBTEST_11( real_complex<MatrixXcf>(10,10) ); CALL_SUBTEST_11( real_complex<ArrayXXcf>(10,10) ); } - -#ifdef EIGEN_TEST_PART_4 - { - // make sure that /=scalar and /scalar do not overflow - // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not - Matrix4d m2, m3; - m3 = m2 = Matrix4d::Random()*1e-20; - m2 = m2 / 4.9e-320; - VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones()); - m3 /= 4.9e-320; - VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones()); - - - } -#endif + CALL_SUBTEST_4( linearstructure_overflow<0>() ); } diff --git a/test/lscg.cpp b/test/lscg.cpp index d49ee00c3..feb2347a8 100644 --- a/test/lscg.cpp +++ b/test/lscg.cpp @@ -30,7 +30,7 @@ template<typename T> void test_lscg_T() CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_rowmajor_I) ); } -void test_lscg() +EIGEN_DECLARE_TEST(lscg) { CALL_SUBTEST_1(test_lscg_T<double>()); CALL_SUBTEST_2(test_lscg_T<std::complex<double> >()); diff --git a/test/lu.cpp b/test/lu.cpp index 9787f4d86..1bbadcbf0 100644 --- a/test/lu.cpp +++ b/test/lu.cpp @@ -9,6 +9,7 @@ #include "main.h" #include <Eigen/LU> +#include "solverbase.h" using namespace std; template<typename MatrixType> @@ -18,7 +19,8 @@ typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { template<typename MatrixType> void lu_non_invertible() { - typedef typename MatrixType::Index Index; + STATIC_CHECK(( internal::is_same<typename FullPivLU<MatrixType>::StorageIndex,int>::value )); + typedef typename MatrixType::RealScalar RealScalar; /* this test covers the following files: LU.h @@ -58,6 +60,10 @@ template<typename MatrixType> void lu_non_invertible() // The image of the zero matrix should consist of a single (zero) column vector VERIFY((MatrixType::Zero(rows,cols).fullPivLu().image(MatrixType::Zero(rows,cols)).cols() == 1)); + // The kernel of the zero matrix is the entire space, and thus is an invertible matrix of dimensions cols. + KernelMatrixType kernel = MatrixType::Zero(rows,cols).fullPivLu().kernel(); + VERIFY((kernel.fullPivLu().isInvertible())); + MatrixType m1(rows, cols), m3(rows, cols2); CMatrixType m2(cols, cols2); createRandomPIMatrixOfRank(rank, rows, cols, m1); @@ -87,42 +93,24 @@ template<typename MatrixType> void lu_non_invertible() VERIFY(!lu.isInjective()); VERIFY(!lu.isInvertible()); VERIFY(!lu.isSurjective()); - VERIFY((m1 * m1kernel).isMuchSmallerThan(m1)); + VERIFY_IS_MUCH_SMALLER_THAN((m1 * m1kernel), m1); VERIFY(m1image.fullPivLu().rank() == rank); VERIFY_IS_APPROX(m1 * m1.adjoint() * m1image, m1image); + check_solverbase<CMatrixType, MatrixType>(m1, lu, rows, cols, cols2); + m2 = CMatrixType::Random(cols,cols2); m3 = m1*m2; m2 = CMatrixType::Random(cols,cols2); // test that the code, which does resize(), may be applied to an xpr m2.block(0,0,m2.rows(),m2.cols()) = lu.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); - - // test solve with transposed - m3 = MatrixType::Random(rows,cols2); - m2 = m1.transpose()*m3; - m3 = MatrixType::Random(rows,cols2); - lu.template _solve_impl_transposed<false>(m2, m3); - VERIFY_IS_APPROX(m2, m1.transpose()*m3); - m3 = MatrixType::Random(rows,cols2); - m3 = lu.transpose().solve(m2); - VERIFY_IS_APPROX(m2, m1.transpose()*m3); - - // test solve with conjugate transposed - m3 = MatrixType::Random(rows,cols2); - m2 = m1.adjoint()*m3; - m3 = MatrixType::Random(rows,cols2); - lu.template _solve_impl_transposed<true>(m2, m3); - VERIFY_IS_APPROX(m2, m1.adjoint()*m3); - m3 = MatrixType::Random(rows,cols2); - m3 = lu.adjoint().solve(m2); - VERIFY_IS_APPROX(m2, m1.adjoint()*m3); } template<typename MatrixType> void lu_invertible() { /* this test covers the following files: - LU.h + FullPivLU.h */ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; Index size = MatrixType::RowsAtCompileTime; @@ -145,10 +133,12 @@ template<typename MatrixType> void lu_invertible() VERIFY(lu.isSurjective()); VERIFY(lu.isInvertible()); VERIFY(lu.image(m1).fullPivLu().isInvertible()); + + check_solverbase<MatrixType, MatrixType>(m1, lu, size, size, size); + + MatrixType m1_inverse = lu.inverse(); m3 = MatrixType::Random(size,size); m2 = lu.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); - MatrixType m1_inverse = lu.inverse(); VERIFY_IS_APPROX(m2, m1_inverse*m3); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); @@ -157,64 +147,37 @@ template<typename MatrixType> void lu_invertible() // truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); - // test solve with transposed - lu.template _solve_impl_transposed<false>(m3, m2); - VERIFY_IS_APPROX(m3, m1.transpose()*m2); - m3 = MatrixType::Random(size,size); - m3 = lu.transpose().solve(m2); - VERIFY_IS_APPROX(m2, m1.transpose()*m3); - - // test solve with conjugate transposed - lu.template _solve_impl_transposed<true>(m3, m2); - VERIFY_IS_APPROX(m3, m1.adjoint()*m2); - m3 = MatrixType::Random(size,size); - m3 = lu.adjoint().solve(m2); - VERIFY_IS_APPROX(m2, m1.adjoint()*m3); - // Regression test for Bug 302 MatrixType m4 = MatrixType::Random(size,size); VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4); } -template<typename MatrixType> void lu_partial_piv() +template<typename MatrixType> void lu_partial_piv(Index size = MatrixType::ColsAtCompileTime) { /* this test covers the following files: PartialPivLU.h */ - typedef typename MatrixType::Index Index; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; - Index size = internal::random<Index>(1,4); MatrixType m1(size, size), m2(size, size), m3(size, size); m1.setRandom(); PartialPivLU<MatrixType> plu(m1); + STATIC_CHECK(( internal::is_same<typename PartialPivLU<MatrixType>::StorageIndex,int>::value )); + VERIFY_IS_APPROX(m1, plu.reconstructedMatrix()); + check_solverbase<MatrixType, MatrixType>(m1, plu, size, size, size); + + MatrixType m1_inverse = plu.inverse(); m3 = MatrixType::Random(size,size); m2 = plu.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); - MatrixType m1_inverse = plu.inverse(); VERIFY_IS_APPROX(m2, m1_inverse*m3); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); const RealScalar rcond_est = plu.rcond(); // Verify that the estimate is within a factor of 10 of the truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); - - // test solve with transposed - plu.template _solve_impl_transposed<false>(m3, m2); - VERIFY_IS_APPROX(m3, m1.transpose()*m2); - m3 = MatrixType::Random(size,size); - m3 = plu.transpose().solve(m2); - VERIFY_IS_APPROX(m2, m1.transpose()*m3); - - // test solve with conjugate transposed - plu.template _solve_impl_transposed<true>(m3, m2); - VERIFY_IS_APPROX(m3, m1.adjoint()*m2); - m3 = MatrixType::Random(size,size); - m3 = plu.adjoint().solve(m2); - VERIFY_IS_APPROX(m2, m1.adjoint()*m3); } template<typename MatrixType> void lu_verify_assert() @@ -228,6 +191,8 @@ template<typename MatrixType> void lu_verify_assert() VERIFY_RAISES_ASSERT(lu.kernel()) VERIFY_RAISES_ASSERT(lu.image(tmp)) VERIFY_RAISES_ASSERT(lu.solve(tmp)) + VERIFY_RAISES_ASSERT(lu.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(lu.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(lu.determinant()) VERIFY_RAISES_ASSERT(lu.rank()) VERIFY_RAISES_ASSERT(lu.dimensionOfKernel()) @@ -240,19 +205,25 @@ template<typename MatrixType> void lu_verify_assert() VERIFY_RAISES_ASSERT(plu.matrixLU()) VERIFY_RAISES_ASSERT(plu.permutationP()) VERIFY_RAISES_ASSERT(plu.solve(tmp)) + VERIFY_RAISES_ASSERT(plu.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(plu.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(plu.determinant()) VERIFY_RAISES_ASSERT(plu.inverse()) } -void test_lu() +EIGEN_DECLARE_TEST(lu) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( lu_non_invertible<Matrix3f>() ); CALL_SUBTEST_1( lu_invertible<Matrix3f>() ); CALL_SUBTEST_1( lu_verify_assert<Matrix3f>() ); + CALL_SUBTEST_1( lu_partial_piv<Matrix3f>() ); CALL_SUBTEST_2( (lu_non_invertible<Matrix<double, 4, 6> >()) ); CALL_SUBTEST_2( (lu_verify_assert<Matrix<double, 4, 6> >()) ); + CALL_SUBTEST_2( lu_partial_piv<Matrix2d>() ); + CALL_SUBTEST_2( lu_partial_piv<Matrix4d>() ); + CALL_SUBTEST_2( (lu_partial_piv<Matrix<double,6,6> >()) ); CALL_SUBTEST_3( lu_non_invertible<MatrixXf>() ); CALL_SUBTEST_3( lu_invertible<MatrixXf>() ); @@ -260,7 +231,7 @@ void test_lu() CALL_SUBTEST_4( lu_non_invertible<MatrixXd>() ); CALL_SUBTEST_4( lu_invertible<MatrixXd>() ); - CALL_SUBTEST_4( lu_partial_piv<MatrixXd>() ); + CALL_SUBTEST_4( lu_partial_piv<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ); CALL_SUBTEST_4( lu_verify_assert<MatrixXd>() ); CALL_SUBTEST_5( lu_non_invertible<MatrixXcf>() ); @@ -269,7 +240,7 @@ void test_lu() CALL_SUBTEST_6( lu_non_invertible<MatrixXcd>() ); CALL_SUBTEST_6( lu_invertible<MatrixXcd>() ); - CALL_SUBTEST_6( lu_partial_piv<MatrixXcd>() ); + CALL_SUBTEST_6( lu_partial_piv<MatrixXcd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ); CALL_SUBTEST_6( lu_verify_assert<MatrixXcd>() ); CALL_SUBTEST_7(( lu_non_invertible<Matrix<float,Dynamic,16> >() )); diff --git a/test/main.h b/test/main.h index bd5325196..07f3794ac 100644 --- a/test/main.h +++ b/test/main.h @@ -1,3 +1,4 @@ + // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // @@ -17,6 +18,7 @@ #include <sstream> #include <vector> #include <typeinfo> +#include <functional> // The following includes of STL headers have to be done _before_ the // definition of macros min() and max(). The reason is that many STL @@ -38,27 +40,66 @@ // definitions. #include <limits> #include <algorithm> +// Disable ICC's std::complex operator specializations so we can use our own. +#define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1 #include <complex> #include <deque> #include <queue> #include <cassert> #include <list> -#if __cplusplus >= 201103L +#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) #include <random> +#include <chrono> #ifdef EIGEN_USE_THREADS #include <future> #endif #endif +// Same for cuda_fp16.h +#if defined(__CUDACC__) && !defined(EIGEN_NO_CUDA) + // Means the compiler is either nvcc or clang with CUDA enabled + #define EIGEN_CUDACC __CUDACC__ +#endif +#if defined(EIGEN_CUDACC) +#include <cuda.h> + #define EIGEN_CUDA_SDK_VER (CUDA_VERSION * 10) +#else + #define EIGEN_CUDA_SDK_VER 0 +#endif +#if EIGEN_CUDA_SDK_VER >= 70500 +#include <cuda_fp16.h> +#endif + // To test that all calls from Eigen code to std::min() and std::max() are // protected by parenthesis against macro expansion, the min()/max() macros // are defined here and any not-parenthesized min/max call will cause a // compiler error. -#define min(A,B) please_protect_your_min_with_parentheses -#define max(A,B) please_protect_your_max_with_parentheses -#define isnan(X) please_protect_your_isnan_with_parentheses -#define isinf(X) please_protect_your_isinf_with_parentheses -#define isfinite(X) please_protect_your_isfinite_with_parentheses +#if !defined(__HIPCC__) && !defined(EIGEN_USE_SYCL) + // + // HIP header files include the following files + // <thread> + // <regex> + // <unordered_map> + // which seem to contain not-parenthesized calls to "max"/"min", triggering the following check and causing the compile to fail + // + // Including those header files before the following macro definition for "min" / "max", only partially resolves the issue + // This is because other HIP header files also define "isnan" / "isinf" / "isfinite" functions, which are needed in other + // headers. + // + // So instead choosing to simply disable this check for HIP + // + #define min(A,B) please_protect_your_min_with_parentheses + #define max(A,B) please_protect_your_max_with_parentheses + #define isnan(X) please_protect_your_isnan_with_parentheses + #define isinf(X) please_protect_your_isinf_with_parentheses + #define isfinite(X) please_protect_your_isfinite_with_parentheses +#endif + + +// test possible conflicts +struct real {}; +struct imag {}; + #ifdef M_PI #undef M_PI #endif @@ -67,6 +108,8 @@ #define FORBIDDEN_IDENTIFIER (this_identifier_is_forbidden_to_avoid_clashes) this_identifier_is_forbidden_to_avoid_clashes // B0 is defined in POSIX header termios.h #define B0 FORBIDDEN_IDENTIFIER +// `I` may be defined by complex.h: +#define I FORBIDDEN_IDENTIFIER // Unit tests calling Eigen's blas library must preserve the default blocking size // to avoid troubles. @@ -93,13 +136,12 @@ inline void on_temporary_creation(long int size) { #define VERIFY_EVALUATION_COUNT(XPR,N) {\ nb_temporaries = 0; \ XPR; \ - if(nb_temporaries!=N) { std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; }\ - VERIFY( (#XPR) && nb_temporaries==N ); \ + if(nb_temporaries!=(N)) { std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; }\ + VERIFY( (#XPR) && nb_temporaries==(N) ); \ } - + #endif -// the following file is automatically generated by cmake #include "split_test_helper.h" #ifdef NDEBUG @@ -116,10 +158,6 @@ inline void on_temporary_creation(long int size) { #define EIGEN_MAKING_DOCS #endif -#ifndef EIGEN_TEST_FUNC -#error EIGEN_TEST_FUNC must be defined -#endif - #define DEFAULT_REPEAT 10 namespace Eigen @@ -128,20 +166,50 @@ namespace Eigen // level == 0 <=> abort if test fail // level >= 1 <=> warning message to std::cerr if test fail static int g_test_level = 0; - static int g_repeat; - static unsigned int g_seed; - static bool g_has_set_repeat, g_has_set_seed; + static int g_repeat = 1; + static unsigned int g_seed = 0; + static bool g_has_set_repeat = false, g_has_set_seed = false; + + class EigenTest + { + public: + EigenTest() : m_func(0) {} + EigenTest(const char* a_name, void (*func)(void)) + : m_name(a_name), m_func(func) + { + get_registered_tests().push_back(this); + } + const std::string& name() const { return m_name; } + void operator()() const { m_func(); } + + static const std::vector<EigenTest*>& all() { return get_registered_tests(); } + protected: + static std::vector<EigenTest*>& get_registered_tests() + { + static std::vector<EigenTest*>* ms_registered_tests = new std::vector<EigenTest*>(); + return *ms_registered_tests; + } + std::string m_name; + void (*m_func)(void); + }; + + // Declare and register a test, e.g.: + // EIGEN_DECLARE_TEST(mytest) { ... } + // will create a function: + // void test_mytest() { ... } + // that will be automatically called. + #define EIGEN_DECLARE_TEST(X) \ + void EIGEN_CAT(test_,X) (); \ + static EigenTest EIGEN_CAT(test_handler_,X) (EIGEN_MAKESTRING(X), & EIGEN_CAT(test_,X)); \ + void EIGEN_CAT(test_,X) () } #define TRACK std::cerr << __FILE__ << " " << __LINE__ << std::endl // #define TRACK while() -#define EI_PP_MAKE_STRING2(S) #S -#define EI_PP_MAKE_STRING(S) EI_PP_MAKE_STRING2(S) - #define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "") -#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) +#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__) && !defined(__SYCL_DEVICE_ONLY__) #define EIGEN_EXCEPTIONS #endif @@ -162,9 +230,15 @@ namespace Eigen eigen_assert_exception(void) {} ~eigen_assert_exception() { Eigen::no_more_assert = false; } }; + + struct eigen_static_assert_exception + { + eigen_static_assert_exception(void) {} + ~eigen_static_assert_exception() { Eigen::no_more_assert = false; } + }; } // If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while - // one should have been, then the list of excecuted assertions is printed out. + // one should have been, then the list of executed assertions is printed out. // // EIGEN_DEBUG_ASSERTS is not enabled by default as it // significantly increases the compilation time @@ -190,7 +264,7 @@ namespace Eigen } \ else if (Eigen::internal::push_assert) \ { \ - eigen_assert_list.push_back(std::string(EI_PP_MAKE_STRING(__FILE__) " (" EI_PP_MAKE_STRING(__LINE__) ") : " #a) ); \ + eigen_assert_list.push_back(std::string(EIGEN_MAKESTRING(__FILE__) " (" EIGEN_MAKESTRING(__LINE__) ") : " #a) ); \ } #ifdef EIGEN_EXCEPTIONS @@ -214,7 +288,7 @@ namespace Eigen } #endif //EIGEN_EXCEPTIONS - #elif !defined(__CUDACC__) // EIGEN_DEBUG_ASSERTS + #elif !defined(__CUDACC__) && !defined(__HIPCC__) && !defined(SYCL_DEVICE_ONLY) // EIGEN_DEBUG_ASSERTS // see bug 89. The copy_bool here is working around a bug in gcc <= 4.3 #define eigen_assert(a) \ if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\ @@ -225,6 +299,7 @@ namespace Eigen else \ EIGEN_THROW_X(Eigen::eigen_assert_exception()); \ } + #ifdef EIGEN_EXCEPTIONS #define VERIFY_RAISES_ASSERT(a) { \ Eigen::no_more_assert = false; \ @@ -236,25 +311,51 @@ namespace Eigen catch (Eigen::eigen_assert_exception&) { VERIFY(true); } \ Eigen::report_on_cerr_on_assert_failure = true; \ } - #endif //EIGEN_EXCEPTIONS + #endif // EIGEN_EXCEPTIONS #endif // EIGEN_DEBUG_ASSERTS + #if defined(TEST_CHECK_STATIC_ASSERTIONS) && defined(EIGEN_EXCEPTIONS) + #define EIGEN_STATIC_ASSERT(a,MSG) \ + if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\ + { \ + Eigen::no_more_assert = true; \ + if(report_on_cerr_on_assert_failure) \ + eigen_plain_assert((a) && #MSG); \ + else \ + EIGEN_THROW_X(Eigen::eigen_static_assert_exception()); \ + } + #define VERIFY_RAISES_STATIC_ASSERT(a) { \ + Eigen::no_more_assert = false; \ + Eigen::report_on_cerr_on_assert_failure = false; \ + try { \ + a; \ + VERIFY(Eigen::should_raise_an_assert && # a); \ + } \ + catch (Eigen::eigen_static_assert_exception&) { VERIFY(true); } \ + Eigen::report_on_cerr_on_assert_failure = true; \ + } + #endif // TEST_CHECK_STATIC_ASSERTIONS + #ifndef VERIFY_RAISES_ASSERT #define VERIFY_RAISES_ASSERT(a) \ std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled\n"; #endif - - #if !defined(__CUDACC__) +#ifndef VERIFY_RAISES_STATIC_ASSERT + #define VERIFY_RAISES_STATIC_ASSERT(a) \ + std::cout << "Can't VERIFY_RAISES_STATIC_ASSERT( " #a " ) with exceptions disabled\n"; +#endif + + #if !defined(__CUDACC__) && !defined(__HIPCC__) && !defined(SYCL_DEVICE_ONLY) #define EIGEN_USE_CUSTOM_ASSERT #endif #else // EIGEN_NO_ASSERTION_CHECKING #define VERIFY_RAISES_ASSERT(a) {} + #define VERIFY_RAISES_STATIC_ASSERT(a) {} #endif // EIGEN_NO_ASSERTION_CHECKING - #define EIGEN_INTERNAL_DEBUGGING #include <Eigen/QR> // required for createRandomPIMatrixOfRank @@ -276,10 +377,10 @@ inline void verify_impl(bool condition, const char *testname, const char *file, } } -#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a)) +#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a)) -#define VERIFY_GE(a, b) ::verify_impl(a >= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a >= b)) -#define VERIFY_LE(a, b) ::verify_impl(a <= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a <= b)) +#define VERIFY_GE(a, b) ::verify_impl(a >= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a >= b)) +#define VERIFY_LE(a, b) ::verify_impl(a <= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a <= b)) #define VERIFY_IS_EQUAL(a, b) VERIFY(test_is_equal(a, b, true)) @@ -293,8 +394,10 @@ inline void verify_impl(bool condition, const char *testname, const char *file, #define VERIFY_IS_UNITARY(a) VERIFY(test_isUnitary(a)) +#define STATIC_CHECK(COND) EIGEN_STATIC_ASSERT( (COND) , EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT ) + #define CALL_SUBTEST(FUNC) do { \ - g_test_stack.push_back(EI_PP_MAKE_STRING(FUNC)); \ + g_test_stack.push_back(EIGEN_MAKESTRING(FUNC)); \ FUNC; \ g_test_stack.pop_back(); \ } while (0) @@ -302,6 +405,13 @@ inline void verify_impl(bool condition, const char *testname, const char *file, namespace Eigen { +template<typename T1,typename T2> +typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type +is_same_type(const T1&, const T2&) +{ + return true; +} + template<typename T> inline typename NumTraits<T>::Real test_precision() { return NumTraits<T>::dummy_precision(); } template<> inline float test_precision<float>() { return 1e-3f; } template<> inline double test_precision<double>() { return 1e-6; } @@ -310,37 +420,30 @@ template<> inline float test_precision<std::complex<float> >() { return test_pre template<> inline double test_precision<std::complex<double> >() { return test_precision<double>(); } template<> inline long double test_precision<std::complex<long double> >() { return test_precision<long double>(); } -inline bool test_isApprox(const short& a, const short& b) -{ return internal::isApprox(a, b, test_precision<short>()); } -inline bool test_isApprox(const unsigned short& a, const unsigned short& b) -{ return internal::isApprox(a, b, test_precision<unsigned long>()); } -inline bool test_isApprox(const unsigned int& a, const unsigned int& b) -{ return internal::isApprox(a, b, test_precision<unsigned int>()); } -inline bool test_isApprox(const long& a, const long& b) -{ return internal::isApprox(a, b, test_precision<long>()); } -inline bool test_isApprox(const unsigned long& a, const unsigned long& b) -{ return internal::isApprox(a, b, test_precision<unsigned long>()); } - -inline bool test_isApprox(const int& a, const int& b) -{ return internal::isApprox(a, b, test_precision<int>()); } -inline bool test_isMuchSmallerThan(const int& a, const int& b) -{ return internal::isMuchSmallerThan(a, b, test_precision<int>()); } -inline bool test_isApproxOrLessThan(const int& a, const int& b) -{ return internal::isApproxOrLessThan(a, b, test_precision<int>()); } - -inline bool test_isApprox(const float& a, const float& b) -{ return internal::isApprox(a, b, test_precision<float>()); } -inline bool test_isMuchSmallerThan(const float& a, const float& b) -{ return internal::isMuchSmallerThan(a, b, test_precision<float>()); } -inline bool test_isApproxOrLessThan(const float& a, const float& b) -{ return internal::isApproxOrLessThan(a, b, test_precision<float>()); } - -inline bool test_isApprox(const double& a, const double& b) -{ return internal::isApprox(a, b, test_precision<double>()); } -inline bool test_isMuchSmallerThan(const double& a, const double& b) -{ return internal::isMuchSmallerThan(a, b, test_precision<double>()); } -inline bool test_isApproxOrLessThan(const double& a, const double& b) -{ return internal::isApproxOrLessThan(a, b, test_precision<double>()); } +#define EIGEN_TEST_SCALAR_TEST_OVERLOAD(TYPE) \ + inline bool test_isApprox(TYPE a, TYPE b) \ + { return internal::isApprox(a, b, test_precision<TYPE>()); } \ + inline bool test_isMuchSmallerThan(TYPE a, TYPE b) \ + { return internal::isMuchSmallerThan(a, b, test_precision<TYPE>()); } \ + inline bool test_isApproxOrLessThan(TYPE a, TYPE b) \ + { return internal::isApproxOrLessThan(a, b, test_precision<TYPE>()); } + +EIGEN_TEST_SCALAR_TEST_OVERLOAD(short) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned short) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(int) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned int) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(long) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned long) +#if EIGEN_HAS_CXX11 +EIGEN_TEST_SCALAR_TEST_OVERLOAD(long long) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned long long) +#endif +EIGEN_TEST_SCALAR_TEST_OVERLOAD(float) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(double) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(half) +EIGEN_TEST_SCALAR_TEST_OVERLOAD(bfloat16) + +#undef EIGEN_TEST_SCALAR_TEST_OVERLOAD #ifndef EIGEN_TEST_NO_COMPLEX inline bool test_isApprox(const std::complex<float>& a, const std::complex<float>& b) @@ -377,13 +480,6 @@ inline bool test_isApproxOrLessThan(const long double& a, const long double& b) { return internal::isApproxOrLessThan(a, b, test_precision<long double>()); } #endif // EIGEN_TEST_NO_LONGDOUBLE -inline bool test_isApprox(const half& a, const half& b) -{ return internal::isApprox(a, b, test_precision<half>()); } -inline bool test_isMuchSmallerThan(const half& a, const half& b) -{ return internal::isMuchSmallerThan(a, b, test_precision<half>()); } -inline bool test_isApproxOrLessThan(const half& a, const half& b) -{ return internal::isApproxOrLessThan(a, b, test_precision<half>()); } - // test_relative_error returns the relative difference between a and b as a real scalar as used in isApprox. template<typename T1,typename T2> typename NumTraits<typename T1::RealScalar>::NonInteger test_relative_error(const EigenBase<T1> &a, const EigenBase<T2> &b) @@ -450,7 +546,7 @@ template<typename T1,typename T2> typename NumTraits<typename NumTraits<T1>::Real>::NonInteger test_relative_error(const T1 &a, const T2 &b, typename internal::enable_if<internal::is_arithmetic<typename NumTraits<T1>::Real>::value, T1>::type* = 0) { typedef typename NumTraits<typename NumTraits<T1>::Real>::NonInteger RealScalar; - return numext::sqrt(RealScalar(numext::abs2(a-b))/RealScalar((numext::mini)(numext::abs2(a),numext::abs2(b)))); + return numext::sqrt(RealScalar(numext::abs2(a-b))/(numext::mini)(RealScalar(numext::abs2(a)),RealScalar(numext::abs2(b)))); } template<typename T> @@ -645,9 +741,6 @@ template<> std::string type_name<std::complex<double> >() { return "comple template<> std::string type_name<std::complex<long double> >() { return "complex<long double>"; } template<> std::string type_name<std::complex<int> >() { return "complex<int>"; } -// forward declaration of the main test function -void EIGEN_CAT(test_,EIGEN_TEST_FUNC)(); - using namespace Eigen; inline void set_repeat_from_string(const char *str) @@ -734,9 +827,16 @@ int main(int argc, char *argv[]) srand(g_seed); std::cout << "Repeating each test " << g_repeat << " times" << std::endl; - Eigen::g_test_stack.push_back(std::string(EI_PP_MAKE_STRING(EIGEN_TEST_FUNC))); + VERIFY(EigenTest::all().size()>0); + + for(std::size_t i=0; i<EigenTest::all().size(); ++i) + { + const EigenTest& current_test = *EigenTest::all()[i]; + Eigen::g_test_stack.push_back(current_test.name()); + current_test(); + Eigen::g_test_stack.pop_back(); + } - EIGEN_CAT(test_,EIGEN_TEST_FUNC)(); return 0; } diff --git a/test/mapped_matrix.cpp b/test/mapped_matrix.cpp index 6a84c5897..0ea136ae6 100644 --- a/test/mapped_matrix.cpp +++ b/test/mapped_matrix.cpp @@ -17,7 +17,6 @@ template<typename VectorType> void map_class_vector(const VectorType& m) { - typedef typename VectorType::Index Index; typedef typename VectorType::Scalar Scalar; Index size = m.size(); @@ -51,7 +50,6 @@ template<typename VectorType> void map_class_vector(const VectorType& m) template<typename MatrixType> void map_class_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(), cols = m.cols(), size = rows*cols; @@ -64,8 +62,9 @@ template<typename MatrixType> void map_class_matrix(const MatrixType& m) for(int i = 0; i < size; i++) array2[i] = Scalar(1); // array3unaligned -> unaligned pointer to heap Scalar* array3 = new Scalar[size+1]; - for(int i = 0; i < size+1; i++) array3[i] = Scalar(1); - Scalar* array3unaligned = internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES == 0 ? array3+1 : array3; + Index sizep1 = size + 1; // <- without this temporary MSVC 2103 generates bad code + for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1); + Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3; Scalar array4[256]; if(size<=256) for(int i = 0; i < size; i++) array4[i] = Scalar(1); @@ -121,7 +120,6 @@ template<typename MatrixType> void map_class_matrix(const MatrixType& m) template<typename VectorType> void map_static_methods(const VectorType& m) { - typedef typename VectorType::Index Index; typedef typename VectorType::Scalar Scalar; Index size = m.size(); @@ -163,7 +161,6 @@ template<typename Scalar> void map_not_aligned_on_scalar() { typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType; - typedef typename MatrixType::Index Index; Index size = 11; Scalar* array1 = internal::aligned_new<Scalar>((size+1)*(size+1)+1); Scalar* array2 = reinterpret_cast<Scalar*>(sizeof(Scalar)/2+std::size_t(array1)); @@ -181,7 +178,7 @@ void map_not_aligned_on_scalar() internal::aligned_delete(array1, (size+1)*(size+1)+1); } -void test_mapped_matrix() +EIGEN_DECLARE_TEST(mapped_matrix) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( map_class_vector(Matrix<float, 1, 1>()) ); @@ -205,7 +202,6 @@ void test_mapped_matrix() CALL_SUBTEST_8( map_static_methods(RowVector3d()) ); CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) ); CALL_SUBTEST_10( map_static_methods(VectorXf(12)) ); - CALL_SUBTEST_11( map_not_aligned_on_scalar<double>() ); } } diff --git a/test/mapstaticmethods.cpp b/test/mapstaticmethods.cpp index 06272d106..d0128ba94 100644 --- a/test/mapstaticmethods.cpp +++ b/test/mapstaticmethods.cpp @@ -9,8 +9,12 @@ #include "main.h" +// GCC<=4.8 has spurious shadow warnings, because `ptr` re-appears inside template instantiations +// workaround: put these in an anonymous namespace +namespace { float *ptr; const float *const_ptr; +} template<typename PlainObjectType, bool IsDynamicSize = PlainObjectType::SizeAtCompileTime == Dynamic, @@ -69,7 +73,6 @@ struct mapstaticmethods_impl<PlainObjectType, true, false> { static void run(const PlainObjectType& m) { - typedef typename PlainObjectType::Index Index; Index rows = m.rows(), cols = m.cols(); int i = internal::random<int>(2,5), j = internal::random<int>(2,5); @@ -116,7 +119,6 @@ struct mapstaticmethods_impl<PlainObjectType, true, true> { static void run(const PlainObjectType& v) { - typedef typename PlainObjectType::Index Index; Index size = v.size(); int i = internal::random<int>(2,5); @@ -145,7 +147,7 @@ void mapstaticmethods(const PlainObjectType& m) VERIFY(true); // just to avoid 'unused function' warning } -void test_mapstaticmethods() +EIGEN_DECLARE_TEST(mapstaticmethods) { ptr = internal::aligned_new<float>(1000); for(int i = 0; i < 1000; i++) ptr[i] = float(i); diff --git a/test/mapstride.cpp b/test/mapstride.cpp index 4858f8fea..fde73f2ec 100644 --- a/test/mapstride.cpp +++ b/test/mapstride.cpp @@ -11,7 +11,6 @@ template<int Alignment,typename VectorType> void map_class_vector(const VectorType& m) { - typedef typename VectorType::Index Index; typedef typename VectorType::Scalar Scalar; Index size = m.size(); @@ -50,7 +49,6 @@ template<int Alignment,typename VectorType> void map_class_vector(const VectorTy template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixType& _m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = _m.rows(), cols = _m.cols(); @@ -58,7 +56,7 @@ template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixTy MatrixType m = MatrixType::Random(rows,cols); Scalar s1 = internal::random<Scalar>(); - Index arraysize = 2*(rows+4)*(cols+4); + Index arraysize = 4*(rows+4)*(cols+4); Scalar* a_array1 = internal::aligned_new<Scalar>(arraysize+1); Scalar* array1 = a_array1; @@ -143,13 +141,92 @@ template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixTy VERIFY_IS_APPROX(map,s1*m); } + // test inner stride and no outer stride + for(int k=0; k<2; ++k) + { + if(k==1 && (m.innerSize()*2)*m.outerSize() > maxsize2) + break; + Scalar* array = (k==0 ? array1 : array2); + + Map<MatrixType, Alignment, InnerStride<Dynamic> > map(array, rows, cols, InnerStride<Dynamic>(2)); + map = m; + VERIFY(map.outerStride() == map.innerSize()*2); + for(int i = 0; i < m.outerSize(); ++i) + for(int j = 0; j < m.innerSize(); ++j) + { + VERIFY(array[map.innerSize()*i*2+j*2] == m.coeffByOuterInner(i,j)); + VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); + } + VERIFY_IS_APPROX(s1*map,s1*m); + map *= s1; + VERIFY_IS_APPROX(map,s1*m); + } + + // test negative strides + { + Matrix<Scalar,Dynamic,1>::Map(a_array1, arraysize+1).setRandom(); + Index outerstride = m.innerSize()+4; + Scalar* array = array1; + + { + Map<MatrixType, Alignment, OuterStride<> > map1(array, rows, cols, OuterStride<>( outerstride)); + Map<MatrixType, Unaligned, OuterStride<> > map2(array+(m.outerSize()-1)*outerstride, rows, cols, OuterStride<>(-outerstride)); + if(MatrixType::IsRowMajor) VERIFY_IS_APPROX(map1.colwise().reverse(), map2); + else VERIFY_IS_APPROX(map1.rowwise().reverse(), map2); + } + + { + Map<MatrixType, Alignment, OuterStride<> > map1(array, rows, cols, OuterStride<>( outerstride)); + Map<MatrixType, Unaligned, Stride<Dynamic,Dynamic> > map2(array+(m.outerSize()-1)*outerstride+m.innerSize()-1, rows, cols, Stride<Dynamic,Dynamic>(-outerstride,-1)); + VERIFY_IS_APPROX(map1.reverse(), map2); + } + + { + Map<MatrixType, Alignment, OuterStride<> > map1(array, rows, cols, OuterStride<>( outerstride)); + Map<MatrixType, Unaligned, Stride<Dynamic,-1> > map2(array+(m.outerSize()-1)*outerstride+m.innerSize()-1, rows, cols, Stride<Dynamic,-1>(-outerstride,-1)); + VERIFY_IS_APPROX(map1.reverse(), map2); + } + } + internal::aligned_delete(a_array1, arraysize+1); } -void test_mapstride() +// Additional tests for inner-stride but no outer-stride +template<int> +void bug1453() +{ + const int data[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; + typedef Matrix<int,Dynamic,Dynamic,RowMajor> RowMatrixXi; + typedef Matrix<int,2,3,ColMajor> ColMatrix23i; + typedef Matrix<int,3,2,ColMajor> ColMatrix32i; + typedef Matrix<int,2,3,RowMajor> RowMatrix23i; + typedef Matrix<int,3,2,RowMajor> RowMatrix32i; + + VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); + + VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + + VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); + VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); +} + +EIGEN_DECLARE_TEST(mapstride) { for(int i = 0; i < g_repeat; i++) { - int maxn = 30; + int maxn = 3; CALL_SUBTEST_1( map_class_vector<Aligned>(Matrix<float, 1, 1>()) ); CALL_SUBTEST_1( map_class_vector<Unaligned>(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( map_class_vector<Aligned>(Vector4d()) ); @@ -175,6 +252,8 @@ void test_mapstride() CALL_SUBTEST_5( map_class_matrix<Unaligned>(MatrixXi(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_6( map_class_matrix<Aligned>(MatrixXcd(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_6( map_class_matrix<Unaligned>(MatrixXcd(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); + + CALL_SUBTEST_5( bug1453<0>() ); TEST_SET_BUT_UNUSED_VARIABLE(maxn); } diff --git a/test/meta.cpp b/test/meta.cpp index b8dea68e8..7a8b93c3d 100644 --- a/test/meta.cpp +++ b/test/meta.cpp @@ -15,14 +15,26 @@ bool check_is_convertible(const From&, const To&) return internal::is_convertible<From,To>::value; } -void test_meta() +struct FooReturnType { + typedef int ReturnType; +}; + +struct MyInterface { + virtual void func() = 0; + virtual ~MyInterface() {} +}; +struct MyImpl : public MyInterface { + void func() {} +}; + +EIGEN_DECLARE_TEST(meta) { VERIFY((internal::conditional<(3<4),internal::true_type, internal::false_type>::type::value)); VERIFY(( internal::is_same<float,float>::value)); VERIFY((!internal::is_same<float,double>::value)); VERIFY((!internal::is_same<float,float&>::value)); VERIFY((!internal::is_same<float,const float&>::value)); - + VERIFY(( internal::is_same<float,internal::remove_all<const float&>::type >::value)); VERIFY(( internal::is_same<float,internal::remove_all<const float*>::type >::value)); VERIFY(( internal::is_same<float,internal::remove_all<const float*&>::type >::value)); @@ -51,23 +63,40 @@ void test_meta() VERIFY(( internal::is_same< internal::add_const_on_value_type<const float* const>::type, const float* const>::value)); VERIFY(( internal::is_same< internal::add_const_on_value_type<float* const>::type, const float* const>::value)); - + VERIFY(( internal::is_same<float,internal::remove_reference<float&>::type >::value)); VERIFY(( internal::is_same<const float,internal::remove_reference<const float&>::type >::value)); VERIFY(( internal::is_same<float,internal::remove_pointer<float*>::type >::value)); VERIFY(( internal::is_same<const float,internal::remove_pointer<const float*>::type >::value)); VERIFY(( internal::is_same<float,internal::remove_pointer<float* const >::type >::value)); - - VERIFY(( internal::is_convertible<float,double>::value )); - VERIFY(( internal::is_convertible<int,double>::value )); - VERIFY(( internal::is_convertible<double,int>::value )); - VERIFY((!internal::is_convertible<std::complex<double>,double>::value )); - VERIFY(( internal::is_convertible<Array33f,Matrix3f>::value )); -// VERIFY((!internal::is_convertible<Matrix3f,Matrix3d>::value )); //does not work because the conversion is prevented by a static assertion - VERIFY((!internal::is_convertible<Array33f,int>::value )); - VERIFY((!internal::is_convertible<MatrixXf,float>::value )); + + + // is_convertible + STATIC_CHECK(( internal::is_convertible<float,double>::value )); + STATIC_CHECK(( internal::is_convertible<int,double>::value )); + STATIC_CHECK(( internal::is_convertible<int, short>::value )); + STATIC_CHECK(( internal::is_convertible<short, int>::value )); + STATIC_CHECK(( internal::is_convertible<double,int>::value )); + STATIC_CHECK(( internal::is_convertible<double,std::complex<double> >::value )); + STATIC_CHECK((!internal::is_convertible<std::complex<double>,double>::value )); + STATIC_CHECK(( internal::is_convertible<Array33f,Matrix3f>::value )); + STATIC_CHECK(( internal::is_convertible<Matrix3f&,Matrix3f>::value )); + STATIC_CHECK(( internal::is_convertible<Matrix3f&,Matrix3f&>::value )); + STATIC_CHECK(( internal::is_convertible<Matrix3f&,const Matrix3f&>::value )); + STATIC_CHECK(( internal::is_convertible<const Matrix3f&,Matrix3f>::value )); + STATIC_CHECK(( internal::is_convertible<const Matrix3f&,const Matrix3f&>::value )); + STATIC_CHECK((!internal::is_convertible<const Matrix3f&,Matrix3f&>::value )); + STATIC_CHECK((!internal::is_convertible<const Matrix3f,Matrix3f&>::value )); + STATIC_CHECK(!( internal::is_convertible<Matrix3f,Matrix3f&>::value )); + + STATIC_CHECK(!( internal::is_convertible<int,int&>::value )); + STATIC_CHECK(( internal::is_convertible<const int,const int& >::value )); + + //STATIC_CHECK((!internal::is_convertible<Matrix3f,Matrix3d>::value )); //does not even compile because the conversion is prevented by a static assertion + STATIC_CHECK((!internal::is_convertible<Array33f,int>::value )); + STATIC_CHECK((!internal::is_convertible<MatrixXf,float>::value )); { - float f; + float f = 0.0f; MatrixXf A, B; VectorXf a, b; VERIFY(( check_is_convertible(a.dot(b), f) )); @@ -75,7 +104,39 @@ void test_meta() VERIFY((!check_is_convertible(A*B, f) )); VERIFY(( check_is_convertible(A*B, A) )); } - + + #if (EIGEN_COMP_GNUC && EIGEN_COMP_GNUC <= 99) \ + || (EIGEN_COMP_CLANG && EIGEN_COMP_CLANG <= 909) \ + || (EIGEN_COMP_MSVC && EIGEN_COMP_MSVC <=1914) + // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1752, + // basically, a fix in the c++ standard breaks our c++98 implementation + // of is_convertible for abstract classes. + // So the following tests are expected to fail with recent compilers. + + STATIC_CHECK(( !internal::is_convertible<MyInterface, MyImpl>::value )); + #if (!EIGEN_COMP_GNUC_STRICT) || (EIGEN_GNUC_AT_LEAST(4,8)) + // GCC prior to 4.8 fails to compile this test: + // error: cannot allocate an object of abstract type 'MyInterface' + // In other word, it does not obey SFINAE. + // Nevertheless, we don't really care about supporting abstract type as scalar type! + STATIC_CHECK(( !internal::is_convertible<MyImpl, MyInterface>::value )); + #endif + STATIC_CHECK(( internal::is_convertible<MyImpl, const MyInterface&>::value )); + + #endif + + { + int i = 0; + VERIFY(( check_is_convertible(fix<3>(), i) )); + VERIFY((!check_is_convertible(i, fix<DynamicIndex>()) )); + } + + + VERIFY(( internal::has_ReturnType<FooReturnType>::value )); + VERIFY(( internal::has_ReturnType<ScalarBinaryOpTraits<int,int> >::value )); + VERIFY(( !internal::has_ReturnType<MatrixXf>::value )); + VERIFY(( !internal::has_ReturnType<int>::value )); + VERIFY(internal::meta_sqrt<1>::ret == 1); #define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt<X>::ret == int(std::sqrt(double(X)))) VERIFY_META_SQRT(2); diff --git a/test/metis_support.cpp b/test/metis_support.cpp index d87c56a13..b490dacde 100644 --- a/test/metis_support.cpp +++ b/test/metis_support.cpp @@ -19,7 +19,7 @@ template<typename T> void test_metis_T() check_sparse_square_solving(sparselu_metis); } -void test_metis_support() +EIGEN_DECLARE_TEST(metis_support) { CALL_SUBTEST_1(test_metis_T<double>()); } diff --git a/test/miscmatrices.cpp b/test/miscmatrices.cpp index ef20dc749..e71712f33 100644 --- a/test/miscmatrices.cpp +++ b/test/miscmatrices.cpp @@ -14,7 +14,6 @@ template<typename MatrixType> void miscMatrices(const MatrixType& m) /* this test covers the following files: DiagonalMatrix.h Ones.h */ - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; Index rows = m.rows(); @@ -35,7 +34,7 @@ template<typename MatrixType> void miscMatrices(const MatrixType& m) VERIFY_IS_APPROX(square, MatrixType::Identity(rows, rows)); } -void test_miscmatrices() +EIGEN_DECLARE_TEST(miscmatrices) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( miscMatrices(Matrix<float, 1, 1>()) ); diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp index ad9c2c652..d450dbff8 100644 --- a/test/mixingtypes.cpp +++ b/test/mixingtypes.cpp @@ -8,13 +8,27 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -// work around "uninitialized" warnings and give that option some testing -#define EIGEN_INITIALIZE_MATRICES_BY_ZERO +#if defined(EIGEN_TEST_PART_7) #ifndef EIGEN_NO_STATIC_ASSERT #define EIGEN_NO_STATIC_ASSERT // turn static asserts into runtime asserts in order to check them #endif +// ignore double-promotion diagnostic for clang and gcc, if we check for static assertion anyway: +// TODO do the same for MSVC? +#if defined(__clang__) +# if (__clang_major__ * 100 + __clang_minor__) >= 308 +# pragma clang diagnostic ignored "-Wdouble-promotion" +# endif +#elif defined(__GNUC__) + // TODO is there a minimal GCC version for this? At least g++-4.7 seems to be fine with this. +# pragma GCC diagnostic ignored "-Wdouble-promotion" +#endif + +#endif + + + #if defined(EIGEN_TEST_PART_1) || defined(EIGEN_TEST_PART_2) || defined(EIGEN_TEST_PART_3) #ifndef EIGEN_DONT_VECTORIZE @@ -35,6 +49,28 @@ using namespace std; VERIFY_IS_APPROX(XPR,REF); \ VERIFY( g_called && #XPR" not properly optimized"); +template<int SizeAtCompileType> +void raise_assertion(Index size = SizeAtCompileType) +{ + // VERIFY_RAISES_ASSERT(mf+md); // does not even compile + Matrix<float, SizeAtCompileType, 1> vf; vf.setRandom(size); + Matrix<double, SizeAtCompileType, 1> vd; vd.setRandom(size); + VERIFY_RAISES_ASSERT(vf=vd); + VERIFY_RAISES_ASSERT(vf+=vd); + VERIFY_RAISES_ASSERT(vf-=vd); + VERIFY_RAISES_ASSERT(vd=vf); + VERIFY_RAISES_ASSERT(vd+=vf); + VERIFY_RAISES_ASSERT(vd-=vf); + + // vd.asDiagonal() * mf; // does not even compile + // vcd.asDiagonal() * mf; // does not even compile + +#if 0 // we get other compilation errors here than just static asserts + VERIFY_RAISES_ASSERT(vd.dot(vf)); +#endif +} + + template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType) { typedef std::complex<float> CF; @@ -69,17 +105,10 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType) double epsd = std::sqrt(std::numeric_limits<double>::min EIGEN_EMPTY ()); while(std::abs(sf )<epsf) sf = internal::random<float>(); - while(std::abs(sd )<epsd) sf = internal::random<double>(); + while(std::abs(sd )<epsd) sd = internal::random<double>(); while(std::abs(scf)<epsf) scf = internal::random<CF>(); while(std::abs(scd)<epsd) scd = internal::random<CD>(); -// VERIFY_RAISES_ASSERT(mf+md); // does not even compile - -#ifdef EIGEN_DONT_VECTORIZE - VERIFY_RAISES_ASSERT(vf=vd); - VERIFY_RAISES_ASSERT(vf+=vd); -#endif - // check scalar products VERIFY_MIX_SCALAR(vcf * sf , vcf * complex<float>(sf)); VERIFY_MIX_SCALAR(sd * vcd , complex<double>(sd) * vcd); @@ -119,9 +148,6 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType) // check dot product vf.dot(vf); -#if 0 // we get other compilation errors here than just static asserts - VERIFY_RAISES_ASSERT(vd.dot(vf)); -#endif VERIFY_IS_APPROX(vcf.dot(vf), vcf.dot(vf.template cast<complex<float> >())); // check diagonal product @@ -130,9 +156,6 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType) VERIFY_IS_APPROX(mcf * vf.asDiagonal(), mcf * vf.template cast<complex<float> >().asDiagonal()); VERIFY_IS_APPROX(md * vcd.asDiagonal(), md.template cast<complex<double> >() * vcd.asDiagonal()); -// vd.asDiagonal() * mf; // does not even compile -// vcd.asDiagonal() * mf; // does not even compile - // check inner product VERIFY_IS_APPROX((vf.transpose() * vcf).value(), (vf.template cast<complex<float> >().transpose() * vcf).value()); @@ -286,8 +309,9 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType) VERIFY_IS_APPROX( rcd.noalias() -= mcd + md*md, - ((md*md).eval().template cast<CD>()) ); } -void test_mixingtypes() +EIGEN_DECLARE_TEST(mixingtypes) { + g_called = false; // Silence -Wunneeded-internal-declaration. for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(mixingtypes<3>()); CALL_SUBTEST_2(mixingtypes<4>()); @@ -296,5 +320,10 @@ void test_mixingtypes() CALL_SUBTEST_4(mixingtypes<3>()); CALL_SUBTEST_5(mixingtypes<4>()); CALL_SUBTEST_6(mixingtypes<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))); + CALL_SUBTEST_7(raise_assertion<Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))); } + CALL_SUBTEST_7(raise_assertion<0>()); + CALL_SUBTEST_7(raise_assertion<3>()); + CALL_SUBTEST_7(raise_assertion<4>()); + CALL_SUBTEST_7(raise_assertion<Dynamic>(0)); } diff --git a/test/mpl2only.cpp b/test/mpl2only.cpp index 7d04d6bba..296350d08 100644 --- a/test/mpl2only.cpp +++ b/test/mpl2only.cpp @@ -7,7 +7,9 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#ifndef EIGEN_MPL2_ONLY #define EIGEN_MPL2_ONLY +#endif #include <Eigen/Dense> #include <Eigen/SparseCore> #include <Eigen/SparseLU> diff --git a/test/nestbyvalue.cpp b/test/nestbyvalue.cpp new file mode 100644 index 000000000..c5356bc24 --- /dev/null +++ b/test/nestbyvalue.cpp @@ -0,0 +1,37 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define TEST_ENABLE_TEMPORARY_TRACKING + +#include "main.h" + +typedef NestByValue<MatrixXd> CpyMatrixXd; +typedef CwiseBinaryOp<internal::scalar_sum_op<double,double>,const CpyMatrixXd,const CpyMatrixXd> XprType; + +XprType get_xpr_with_temps(const MatrixXd& a) +{ + MatrixXd t1 = a.rowwise().reverse(); + MatrixXd t2 = a+a; + return t1.nestByValue() + t2.nestByValue(); +} + +EIGEN_DECLARE_TEST(nestbyvalue) +{ + for(int i = 0; i < g_repeat; i++) { + Index rows = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + Index cols = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE); + MatrixXd a = MatrixXd(rows,cols); + nb_temporaries = 0; + XprType x = get_xpr_with_temps(a); + VERIFY_IS_EQUAL(nb_temporaries,6); + MatrixXd b = x; + VERIFY_IS_EQUAL(nb_temporaries,6+1); + VERIFY_IS_APPROX(b, a.rowwise().reverse().eval() + (a+a).eval()); + } +} diff --git a/test/nesting_ops.cpp b/test/nesting_ops.cpp index a419b0e44..4b5fc21f2 100644 --- a/test/nesting_ops.cpp +++ b/test/nesting_ops.cpp @@ -91,7 +91,7 @@ template <typename MatrixType> void run_nesting_ops_2(const MatrixType& _m) } -void test_nesting_ops() +EIGEN_DECLARE_TEST(nesting_ops) { CALL_SUBTEST_1(run_nesting_ops_1(MatrixXf::Random(25,25))); CALL_SUBTEST_2(run_nesting_ops_1(MatrixXcd::Random(25,25))); diff --git a/test/nomalloc.cpp b/test/nomalloc.cpp index 50756c2fb..cb4c073e9 100644 --- a/test/nomalloc.cpp +++ b/test/nomalloc.cpp @@ -24,7 +24,6 @@ template<typename MatrixType> void nomalloc(const MatrixType& m) { /* this test check no dynamic memory allocation are issued with fixed-size matrices */ - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); @@ -173,7 +172,7 @@ template<typename MatrixType> void test_reference(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; enum { Flag = MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; enum { TransposeFlag = !MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; - typename MatrixType::Index rows = m.rows(), cols=m.cols(); + Index rows = m.rows(), cols=m.cols(); typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flag > MatrixX; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, TransposeFlag> MatrixXT; // Dynamic reference: @@ -203,7 +202,7 @@ template<typename MatrixType> void test_reference(const MatrixType& m) { } -void test_nomalloc() +EIGEN_DECLARE_TEST(nomalloc) { // create some dynamic objects Eigen::MatrixXd M1 = MatrixXd::Random(3,3); diff --git a/test/nullary.cpp b/test/nullary.cpp index acd55506e..9b25ea4f3 100644 --- a/test/nullary.cpp +++ b/test/nullary.cpp @@ -70,7 +70,7 @@ void testVectorType(const VectorType& base) Scalar high = internal::random<Scalar>(-500,500); Scalar low = (size == 1 ? high : internal::random<Scalar>(-500,500)); - if (low>high) std::swap(low,high); + if (numext::real(low)>numext::real(high)) std::swap(low,high); // check low==high if(internal::random<float>(0.f,1.f)<0.05f) @@ -79,7 +79,7 @@ void testVectorType(const VectorType& base) else if(size>2 && std::numeric_limits<RealScalar>::max_exponent10>0 && internal::random<float>(0.f,1.f)<0.1f) low = -internal::random<Scalar>(1,2) * RealScalar(std::pow(RealScalar(10),std::numeric_limits<RealScalar>::max_exponent10/2)); - const Scalar step = ((size == 1) ? 1 : (high-low)/(size-1)); + const Scalar step = ((size == 1) ? 1 : (high-low)/RealScalar(size-1)); // check whether the result yields what we expect it to do VectorType m(base); @@ -89,21 +89,22 @@ void testVectorType(const VectorType& base) { VectorType n(size); for (int i=0; i<size; ++i) - n(i) = low+i*step; + n(i) = low+RealScalar(i)*step; VERIFY_IS_APPROX(m,n); CALL_SUBTEST( check_extremity_accuracy(m, low, high) ); } - if((!NumTraits<Scalar>::IsInteger) || ((high-low)>=size && (Index(high-low)%(size-1))==0) || (Index(high-low+1)<size && (size%Index(high-low+1))==0)) + RealScalar range_length = numext::real(high-low); + if((!NumTraits<Scalar>::IsInteger) || (range_length>=size && (Index(range_length)%(size-1))==0) || (Index(range_length+1)<size && (size%Index(range_length+1))==0)) { VectorType n(size); - if((!NumTraits<Scalar>::IsInteger) || (high-low>=size)) + if((!NumTraits<Scalar>::IsInteger) || (range_length>=size)) for (int i=0; i<size; ++i) - n(i) = size==1 ? low : (low + ((high-low)*Scalar(i))/(size-1)); + n(i) = size==1 ? low : (low + ((high-low)*Scalar(i))/RealScalar(size-1)); else for (int i=0; i<size; ++i) - n(i) = size==1 ? low : low + Scalar((double(high-low+1)*double(i))/double(size)); + n(i) = size==1 ? low : low + Scalar((double(range_length+1)*double(i))/double(size)); VERIFY_IS_APPROX(m,n); // random access version @@ -116,12 +117,12 @@ void testVectorType(const VectorType& base) CALL_SUBTEST( check_extremity_accuracy(m, low, high) ); } - VERIFY( m(m.size()-1) <= high ); - VERIFY( (m.array() <= high).all() ); - VERIFY( (m.array() >= low).all() ); + VERIFY( numext::real(m(m.size()-1)) <= numext::real(high) ); + VERIFY( (m.array().real() <= numext::real(high)).all() ); + VERIFY( (m.array().real() >= numext::real(low)).all() ); - VERIFY( m(m.size()-1) >= low ); + VERIFY( numext::real(m(m.size()-1)) >= numext::real(low) ); if(size>=1) { VERIFY( internal::isApprox(m(0),low) ); @@ -135,7 +136,7 @@ void testVectorType(const VectorType& base) col_vector.setLinSpaced(size,low,high); // when using the extended precision (e.g., FPU) the relative error might exceed 1 bit // when computing the squared sum in isApprox, thus the 2x factor. - VERIFY( row_vector.isApprox(col_vector.transpose(), Scalar(2)*NumTraits<Scalar>::epsilon())); + VERIFY( row_vector.isApprox(col_vector.transpose(), RealScalar(2)*NumTraits<Scalar>::epsilon())); Matrix<Scalar,Dynamic,1> size_changer(size+50); size_changer.setLinSpaced(size,low,high); @@ -157,18 +158,18 @@ void testVectorType(const VectorType& base) { Index n0 = VectorType::SizeAtCompileTime==Dynamic ? 0 : VectorType::SizeAtCompileTime; low = internal::random<Scalar>(); - m = VectorType::LinSpaced(n0,low,low-1); + m = VectorType::LinSpaced(n0,low,low-RealScalar(1)); VERIFY(m.size()==n0); if(VectorType::SizeAtCompileTime==Dynamic) { VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,0,Scalar(n0-1)).sum(),Scalar(0)); - VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,low,low-1).sum(),Scalar(0)); + VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,low,low-RealScalar(1)).sum(),Scalar(0)); } m.setLinSpaced(n0,0,Scalar(n0-1)); VERIFY(m.size()==n0); - m.setLinSpaced(n0,low,low-1); + m.setLinSpaced(n0,low,low-RealScalar(1)); VERIFY(m.size()==n0); // empty range only: @@ -178,19 +179,37 @@ void testVectorType(const VectorType& base) if(NumTraits<Scalar>::IsInteger) { - VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,Scalar(low+size-1)), VectorType::LinSpaced(size,Scalar(low+size-1),low).reverse() ); + VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar(size-1)), VectorType::LinSpaced(size,low+Scalar(size-1),low).reverse() ); if(VectorType::SizeAtCompileTime==Dynamic) { // Check negative multiplicator path: for(Index k=1; k<5; ++k) - VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,Scalar(low+(size-1)*k)), VectorType::LinSpaced(size,Scalar(low+(size-1)*k),low).reverse() ); + VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar((size-1)*k)), VectorType::LinSpaced(size,low+Scalar((size-1)*k),low).reverse() ); // Check negative divisor path: for(Index k=1; k<5; ++k) - VERIFY_IS_APPROX( VectorType::LinSpaced(size*k,low,Scalar(low+size-1)), VectorType::LinSpaced(size*k,Scalar(low+size-1),low).reverse() ); + VERIFY_IS_APPROX( VectorType::LinSpaced(size*k,low,low+Scalar(size-1)), VectorType::LinSpaced(size*k,low+Scalar(size-1),low).reverse() ); } } } + + // test setUnit() + if(m.size()>0) + { + for(Index k=0; k<10; ++k) + { + Index i = internal::random<Index>(0,m.size()-1); + m.setUnit(i); + VERIFY_IS_APPROX( m, VectorType::Unit(m.size(), i) ); + } + if(VectorType::SizeAtCompileTime==Dynamic) + { + Index i = internal::random<Index>(0,2*m.size()-1); + m.setUnit(2*m.size(),i); + VERIFY_IS_APPROX( m, VectorType::Unit(m.size(),i) ); + } + } + } template<typename MatrixType> @@ -221,45 +240,36 @@ void testMatrixType(const MatrixType& m) VERIFY_IS_APPROX( A(i,j), s1 ); } -void test_nullary() +template<int> +void bug79() { - CALL_SUBTEST_1( testMatrixType(Matrix2d()) ); - CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random<int>(1,300),internal::random<int>(1,300))) ); - CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) ); - - for(int i = 0; i < g_repeat*10; i++) { - CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,30000))) ); - CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232 - CALL_SUBTEST_6( testVectorType(Vector3d()) ); - CALL_SUBTEST_7( testVectorType(VectorXf(internal::random<int>(1,30000))) ); - CALL_SUBTEST_8( testVectorType(Vector3f()) ); - CALL_SUBTEST_8( testVectorType(Vector4f()) ); - CALL_SUBTEST_8( testVectorType(Matrix<float,8,1>()) ); - CALL_SUBTEST_8( testVectorType(Matrix<float,1,1>()) ); - - CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(1,10))) ); - CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(9,300))) ); - CALL_SUBTEST_9( testVectorType(Matrix<int,1,1>()) ); - } - -#ifdef EIGEN_TEST_PART_6 // Assignment of a RowVectorXd to a MatrixXd (regression test for bug #79). VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits<double>::epsilon() ); -#endif +} -#ifdef EIGEN_TEST_PART_9 +template<int> +void bug1630() +{ + Array4d x4 = Array4d::LinSpaced(0.0, 1.0); + Array3d x3(Array4d::LinSpaced(0.0, 1.0).head(3)); + VERIFY_IS_APPROX(x4.head(3), x3); +} + +template<int> +void nullary_overflow() +{ // Check possible overflow issue - { - int n = 60000; - ArrayXi a1(n), a2(n); - a1.setLinSpaced(n, 0, n-1); - for(int i=0; i<n; ++i) - a2(i) = i; - VERIFY_IS_APPROX(a1,a2); - } -#endif + int n = 60000; + ArrayXi a1(n), a2(n); + a1.setLinSpaced(n, 0, n-1); + for(int i=0; i<n; ++i) + a2(i) = i; + VERIFY_IS_APPROX(a1,a2); +} -#ifdef EIGEN_TEST_PART_10 +template<int> +void nullary_internal_logic() +{ // check some internal logic VERIFY(( internal::has_nullary_operator<internal::scalar_constant_op<double> >::value )); VERIFY(( !internal::has_unary_operator<internal::scalar_constant_op<double> >::value )); @@ -271,10 +281,10 @@ void test_nullary() VERIFY(( internal::has_binary_operator<internal::scalar_identity_op<double> >::value )); VERIFY(( !internal::functor_has_linear_access<internal::scalar_identity_op<double> >::ret )); - VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<float,float> >::value )); - VERIFY(( internal::has_unary_operator<internal::linspaced_op<float,float> >::value )); - VERIFY(( !internal::has_binary_operator<internal::linspaced_op<float,float> >::value )); - VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<float,float> >::ret )); + VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<float> >::value )); + VERIFY(( internal::has_unary_operator<internal::linspaced_op<float> >::value )); + VERIFY(( !internal::has_binary_operator<internal::linspaced_op<float> >::value )); + VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<float> >::ret )); // Regression unit test for a weird MSVC bug. // Search "nullary_wrapper_workaround_msvc" in CoreEvaluators.h for the details. @@ -295,10 +305,37 @@ void test_nullary() VERIFY(( !internal::has_binary_operator<internal::scalar_constant_op<float> >::value )); VERIFY(( internal::functor_has_linear_access<internal::scalar_constant_op<float> >::ret )); - VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<int,int> >::value )); - VERIFY(( internal::has_unary_operator<internal::linspaced_op<int,int> >::value )); - VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int,int> >::value )); - VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int,int> >::ret )); + VERIFY(( !internal::has_nullary_operator<internal::linspaced_op<int> >::value )); + VERIFY(( internal::has_unary_operator<internal::linspaced_op<int> >::value )); + VERIFY(( !internal::has_binary_operator<internal::linspaced_op<int> >::value )); + VERIFY(( internal::functor_has_linear_access<internal::linspaced_op<int> >::ret )); } -#endif +} + +EIGEN_DECLARE_TEST(nullary) +{ + CALL_SUBTEST_1( testMatrixType(Matrix2d()) ); + CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random<int>(1,300),internal::random<int>(1,300))) ); + CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) ); + + for(int i = 0; i < g_repeat*10; i++) { + CALL_SUBTEST_3( testVectorType(VectorXcd(internal::random<int>(1,30000))) ); + CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,30000))) ); + CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232 + CALL_SUBTEST_6( testVectorType(Vector3d()) ); + CALL_SUBTEST_7( testVectorType(VectorXf(internal::random<int>(1,30000))) ); + CALL_SUBTEST_8( testVectorType(Vector3f()) ); + CALL_SUBTEST_8( testVectorType(Vector4f()) ); + CALL_SUBTEST_8( testVectorType(Matrix<float,8,1>()) ); + CALL_SUBTEST_8( testVectorType(Matrix<float,1,1>()) ); + + CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(1,10))) ); + CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(9,300))) ); + CALL_SUBTEST_9( testVectorType(Matrix<int,1,1>()) ); + } + + CALL_SUBTEST_6( bug79<0>() ); + CALL_SUBTEST_6( bug1630<0>() ); + CALL_SUBTEST_9( nullary_overflow<0>() ); + CALL_SUBTEST_10( nullary_internal_logic<0>() ); } diff --git a/test/num_dimensions.cpp b/test/num_dimensions.cpp new file mode 100644 index 000000000..7ad7ef697 --- /dev/null +++ b/test/num_dimensions.cpp @@ -0,0 +1,90 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include <Eigen/SparseCore> + +template<int ExpectedDim,typename Xpr> +void check_dim(const Xpr& ) { + STATIC_CHECK( Xpr::NumDimensions == ExpectedDim ); +} + +#if EIGEN_HAS_CXX11 +template<template <typename,int,int> class Object> +void map_num_dimensions() +{ + typedef Object<double, 1, 1> ArrayScalarType; + typedef Object<double, 2, 1> ArrayVectorType; + typedef Object<double, 1, 2> TransposeArrayVectorType; + typedef Object<double, 2, 2> ArrayType; + typedef Object<double, Eigen::Dynamic, 1> DynamicArrayVectorType; + typedef Object<double, 1, Eigen::Dynamic> DynamicTransposeArrayVectorType; + typedef Object<double, Eigen::Dynamic, Eigen::Dynamic> DynamicArrayType; + + STATIC_CHECK(ArrayScalarType::NumDimensions == 0); + STATIC_CHECK(ArrayVectorType::NumDimensions == 1); + STATIC_CHECK(TransposeArrayVectorType::NumDimensions == 1); + STATIC_CHECK(ArrayType::NumDimensions == 2); + STATIC_CHECK(DynamicArrayVectorType::NumDimensions == 1); + STATIC_CHECK(DynamicTransposeArrayVectorType::NumDimensions == 1); + STATIC_CHECK(DynamicArrayType::NumDimensions == 2); + + typedef Eigen::Map<ArrayScalarType> ArrayScalarMap; + typedef Eigen::Map<ArrayVectorType> ArrayVectorMap; + typedef Eigen::Map<TransposeArrayVectorType> TransposeArrayVectorMap; + typedef Eigen::Map<ArrayType> ArrayMap; + typedef Eigen::Map<DynamicArrayVectorType> DynamicArrayVectorMap; + typedef Eigen::Map<DynamicTransposeArrayVectorType> DynamicTransposeArrayVectorMap; + typedef Eigen::Map<DynamicArrayType> DynamicArrayMap; + + STATIC_CHECK(ArrayScalarMap::NumDimensions == 0); + STATIC_CHECK(ArrayVectorMap::NumDimensions == 1); + STATIC_CHECK(TransposeArrayVectorMap::NumDimensions == 1); + STATIC_CHECK(ArrayMap::NumDimensions == 2); + STATIC_CHECK(DynamicArrayVectorMap::NumDimensions == 1); + STATIC_CHECK(DynamicTransposeArrayVectorMap::NumDimensions == 1); + STATIC_CHECK(DynamicArrayMap::NumDimensions == 2); +} + +template<typename Scalar, int Rows, int Cols> +using TArray = Array<Scalar,Rows,Cols>; + +template<typename Scalar, int Rows, int Cols> +using TMatrix = Matrix<Scalar,Rows,Cols>; + +#endif + +EIGEN_DECLARE_TEST(num_dimensions) +{ + int n = 10; + ArrayXXd A(n,n); + CALL_SUBTEST( check_dim<2>(A) ); + CALL_SUBTEST( check_dim<2>(A.block(1,1,2,2)) ); + CALL_SUBTEST( check_dim<1>(A.col(1)) ); + CALL_SUBTEST( check_dim<1>(A.row(1)) ); + + MatrixXd M(n,n); + CALL_SUBTEST( check_dim<0>(M.row(1)*M.col(1)) ); + + SparseMatrix<double> S(n,n); + CALL_SUBTEST( check_dim<2>(S) ); + CALL_SUBTEST( check_dim<2>(S.block(1,1,2,2)) ); + CALL_SUBTEST( check_dim<1>(S.col(1)) ); + CALL_SUBTEST( check_dim<1>(S.row(1)) ); + + SparseVector<double> s(n); + CALL_SUBTEST( check_dim<1>(s) ); + CALL_SUBTEST( check_dim<1>(s.head(2)) ); + + + #if EIGEN_HAS_CXX11 + CALL_SUBTEST( map_num_dimensions<TArray>() ); + CALL_SUBTEST( map_num_dimensions<TMatrix>() ); + #endif +} diff --git a/test/numext.cpp b/test/numext.cpp index 3de33e2f9..8a2fde501 100644 --- a/test/numext.cpp +++ b/test/numext.cpp @@ -9,16 +9,44 @@ #include "main.h" +template<typename T, typename U> +bool check_if_equal_or_nans(const T& actual, const U& expected) { + return ((actual == expected) || ((numext::isnan)(actual) && (numext::isnan)(expected))); +} + +template<typename T, typename U> +bool check_if_equal_or_nans(const std::complex<T>& actual, const std::complex<U>& expected) { + return check_if_equal_or_nans(numext::real(actual), numext::real(expected)) + && check_if_equal_or_nans(numext::imag(actual), numext::imag(expected)); +} + +template<typename T, typename U> +bool test_is_equal_or_nans(const T& actual, const U& expected) +{ + if (check_if_equal_or_nans(actual, expected)) { + return true; + } + + // false: + std::cerr + << "\n actual = " << actual + << "\n expected = " << expected << "\n\n"; + return false; +} + +#define VERIFY_IS_EQUAL_OR_NANS(a, b) VERIFY(test_is_equal_or_nans(a, b)) + template<typename T> void check_abs() { typedef typename NumTraits<T>::Real Real; + Real zero(0); if(NumTraits<T>::IsSigned) VERIFY_IS_EQUAL(numext::abs(-T(1)), T(1)); VERIFY_IS_EQUAL(numext::abs(T(0)), T(0)); VERIFY_IS_EQUAL(numext::abs(T(1)), T(1)); - for(int k=0; k<g_repeat*100; ++k) + for(int k=0; k<100; ++k) { T x = internal::random<T>(); if(!internal::is_same<T,bool>::value) @@ -26,28 +54,222 @@ void check_abs() { if(NumTraits<T>::IsSigned) { VERIFY_IS_EQUAL(numext::abs(x), numext::abs(-x)); - VERIFY( numext::abs(-x) >= Real(0)); + VERIFY( numext::abs(-x) >= zero ); } - VERIFY( numext::abs(x) >= Real(0)); + VERIFY( numext::abs(x) >= zero ); VERIFY_IS_APPROX( numext::abs2(x), numext::abs2(numext::abs(x)) ); } } -void test_numext() { - CALL_SUBTEST( check_abs<bool>() ); - CALL_SUBTEST( check_abs<signed char>() ); - CALL_SUBTEST( check_abs<unsigned char>() ); - CALL_SUBTEST( check_abs<short>() ); - CALL_SUBTEST( check_abs<unsigned short>() ); - CALL_SUBTEST( check_abs<int>() ); - CALL_SUBTEST( check_abs<unsigned int>() ); - CALL_SUBTEST( check_abs<long>() ); - CALL_SUBTEST( check_abs<unsigned long>() ); - CALL_SUBTEST( check_abs<half>() ); - CALL_SUBTEST( check_abs<float>() ); - CALL_SUBTEST( check_abs<double>() ); - CALL_SUBTEST( check_abs<long double>() ); - - CALL_SUBTEST( check_abs<std::complex<float> >() ); - CALL_SUBTEST( check_abs<std::complex<double> >() ); +template<typename T> +void check_arg() { + typedef typename NumTraits<T>::Real Real; + VERIFY_IS_EQUAL(numext::abs(T(0)), T(0)); + VERIFY_IS_EQUAL(numext::abs(T(1)), T(1)); + + for(int k=0; k<100; ++k) + { + T x = internal::random<T>(); + Real y = numext::arg(x); + VERIFY_IS_APPROX( y, std::arg(x) ); + } +} + +template<typename T> +struct check_sqrt_impl { + static void run() { + for (int i=0; i<1000; ++i) { + const T x = numext::abs(internal::random<T>()); + const T sqrtx = numext::sqrt(x); + VERIFY_IS_APPROX(sqrtx*sqrtx, x); + } + + // Corner cases. + const T zero = T(0); + const T one = T(1); + const T inf = std::numeric_limits<T>::infinity(); + const T nan = std::numeric_limits<T>::quiet_NaN(); + VERIFY_IS_EQUAL(numext::sqrt(zero), zero); + VERIFY_IS_EQUAL(numext::sqrt(inf), inf); + VERIFY((numext::isnan)(numext::sqrt(nan))); + VERIFY((numext::isnan)(numext::sqrt(-one))); + } +}; + +template<typename T> +struct check_sqrt_impl<std::complex<T> > { + static void run() { + typedef typename std::complex<T> ComplexT; + + for (int i=0; i<1000; ++i) { + const ComplexT x = internal::random<ComplexT>(); + const ComplexT sqrtx = numext::sqrt(x); + VERIFY_IS_APPROX(sqrtx*sqrtx, x); + } + + // Corner cases. + const T zero = T(0); + const T one = T(1); + const T inf = std::numeric_limits<T>::infinity(); + const T nan = std::numeric_limits<T>::quiet_NaN(); + + // Set of corner cases from https://en.cppreference.com/w/cpp/numeric/complex/sqrt + const int kNumCorners = 20; + const ComplexT corners[kNumCorners][2] = { + {ComplexT(zero, zero), ComplexT(zero, zero)}, + {ComplexT(-zero, zero), ComplexT(zero, zero)}, + {ComplexT(zero, -zero), ComplexT(zero, zero)}, + {ComplexT(-zero, -zero), ComplexT(zero, zero)}, + {ComplexT(one, inf), ComplexT(inf, inf)}, + {ComplexT(nan, inf), ComplexT(inf, inf)}, + {ComplexT(one, -inf), ComplexT(inf, -inf)}, + {ComplexT(nan, -inf), ComplexT(inf, -inf)}, + {ComplexT(-inf, one), ComplexT(zero, inf)}, + {ComplexT(inf, one), ComplexT(inf, zero)}, + {ComplexT(-inf, -one), ComplexT(zero, -inf)}, + {ComplexT(inf, -one), ComplexT(inf, -zero)}, + {ComplexT(-inf, nan), ComplexT(nan, inf)}, + {ComplexT(inf, nan), ComplexT(inf, nan)}, + {ComplexT(zero, nan), ComplexT(nan, nan)}, + {ComplexT(one, nan), ComplexT(nan, nan)}, + {ComplexT(nan, zero), ComplexT(nan, nan)}, + {ComplexT(nan, one), ComplexT(nan, nan)}, + {ComplexT(nan, -one), ComplexT(nan, nan)}, + {ComplexT(nan, nan), ComplexT(nan, nan)}, + }; + + for (int i=0; i<kNumCorners; ++i) { + const ComplexT& x = corners[i][0]; + const ComplexT sqrtx = corners[i][1]; + VERIFY_IS_EQUAL_OR_NANS(numext::sqrt(x), sqrtx); + } + } +}; + +template<typename T> +void check_sqrt() { + check_sqrt_impl<T>::run(); +} + +template<typename T> +struct check_rsqrt_impl { + static void run() { + const T zero = T(0); + const T one = T(1); + const T inf = std::numeric_limits<T>::infinity(); + const T nan = std::numeric_limits<T>::quiet_NaN(); + + for (int i=0; i<1000; ++i) { + const T x = numext::abs(internal::random<T>()); + const T rsqrtx = numext::rsqrt(x); + const T invx = one / x; + VERIFY_IS_APPROX(rsqrtx*rsqrtx, invx); + } + + // Corner cases. + VERIFY_IS_EQUAL(numext::rsqrt(zero), inf); + VERIFY_IS_EQUAL(numext::rsqrt(inf), zero); + VERIFY((numext::isnan)(numext::rsqrt(nan))); + VERIFY((numext::isnan)(numext::rsqrt(-one))); + } +}; + +template<typename T> +struct check_rsqrt_impl<std::complex<T> > { + static void run() { + typedef typename std::complex<T> ComplexT; + const T zero = T(0); + const T one = T(1); + const T inf = std::numeric_limits<T>::infinity(); + const T nan = std::numeric_limits<T>::quiet_NaN(); + + for (int i=0; i<1000; ++i) { + const ComplexT x = internal::random<ComplexT>(); + const ComplexT invx = ComplexT(one, zero) / x; + const ComplexT rsqrtx = numext::rsqrt(x); + VERIFY_IS_APPROX(rsqrtx*rsqrtx, invx); + } + + // GCC and MSVC differ in their treatment of 1/(0 + 0i) + // GCC/clang = (inf, nan) + // MSVC = (nan, nan) + // and 1 / (x + inf i) + // GCC/clang = (0, 0) + // MSVC = (nan, nan) + #if (EIGEN_COMP_GNUC) + { + const int kNumCorners = 20; + const ComplexT corners[kNumCorners][2] = { + // Only consistent across GCC, clang + {ComplexT(zero, zero), ComplexT(zero, zero)}, + {ComplexT(-zero, zero), ComplexT(zero, zero)}, + {ComplexT(zero, -zero), ComplexT(zero, zero)}, + {ComplexT(-zero, -zero), ComplexT(zero, zero)}, + {ComplexT(one, inf), ComplexT(inf, inf)}, + {ComplexT(nan, inf), ComplexT(inf, inf)}, + {ComplexT(one, -inf), ComplexT(inf, -inf)}, + {ComplexT(nan, -inf), ComplexT(inf, -inf)}, + // Consistent across GCC, clang, MSVC + {ComplexT(-inf, one), ComplexT(zero, inf)}, + {ComplexT(inf, one), ComplexT(inf, zero)}, + {ComplexT(-inf, -one), ComplexT(zero, -inf)}, + {ComplexT(inf, -one), ComplexT(inf, -zero)}, + {ComplexT(-inf, nan), ComplexT(nan, inf)}, + {ComplexT(inf, nan), ComplexT(inf, nan)}, + {ComplexT(zero, nan), ComplexT(nan, nan)}, + {ComplexT(one, nan), ComplexT(nan, nan)}, + {ComplexT(nan, zero), ComplexT(nan, nan)}, + {ComplexT(nan, one), ComplexT(nan, nan)}, + {ComplexT(nan, -one), ComplexT(nan, nan)}, + {ComplexT(nan, nan), ComplexT(nan, nan)}, + }; + + for (int i=0; i<kNumCorners; ++i) { + const ComplexT& x = corners[i][0]; + const ComplexT rsqrtx = ComplexT(one, zero) / corners[i][1]; + VERIFY_IS_EQUAL_OR_NANS(numext::rsqrt(x), rsqrtx); + } + } + #endif + } +}; + +template<typename T> +void check_rsqrt() { + check_rsqrt_impl<T>::run(); +} + +EIGEN_DECLARE_TEST(numext) { + for(int k=0; k<g_repeat; ++k) + { + CALL_SUBTEST( check_abs<bool>() ); + CALL_SUBTEST( check_abs<signed char>() ); + CALL_SUBTEST( check_abs<unsigned char>() ); + CALL_SUBTEST( check_abs<short>() ); + CALL_SUBTEST( check_abs<unsigned short>() ); + CALL_SUBTEST( check_abs<int>() ); + CALL_SUBTEST( check_abs<unsigned int>() ); + CALL_SUBTEST( check_abs<long>() ); + CALL_SUBTEST( check_abs<unsigned long>() ); + CALL_SUBTEST( check_abs<half>() ); + CALL_SUBTEST( check_abs<bfloat16>() ); + CALL_SUBTEST( check_abs<float>() ); + CALL_SUBTEST( check_abs<double>() ); + CALL_SUBTEST( check_abs<long double>() ); + CALL_SUBTEST( check_abs<std::complex<float> >() ); + CALL_SUBTEST( check_abs<std::complex<double> >() ); + + CALL_SUBTEST( check_arg<std::complex<float> >() ); + CALL_SUBTEST( check_arg<std::complex<double> >() ); + + CALL_SUBTEST( check_sqrt<float>() ); + CALL_SUBTEST( check_sqrt<double>() ); + CALL_SUBTEST( check_sqrt<std::complex<float> >() ); + CALL_SUBTEST( check_sqrt<std::complex<double> >() ); + + CALL_SUBTEST( check_rsqrt<float>() ); + CALL_SUBTEST( check_rsqrt<double>() ); + CALL_SUBTEST( check_rsqrt<std::complex<float> >() ); + CALL_SUBTEST( check_rsqrt<std::complex<double> >() ); + } } diff --git a/test/packetmath.cpp b/test/packetmath.cpp index 7821a1738..121ec7283 100644 --- a/test/packetmath.cpp +++ b/test/packetmath.cpp @@ -8,283 +8,562 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -#include "main.h" -#include "unsupported/Eigen/SpecialFunctions" +#include "packetmath_test_shared.h" +#include "random_without_cast_overflow.h" -#if defined __GNUC__ && __GNUC__>=6 - #pragma GCC diagnostic ignored "-Wignored-attributes" -#endif -// using namespace Eigen; - -#ifdef EIGEN_VECTORIZE_SSE -const bool g_vectorize_sse = true; -#else -const bool g_vectorize_sse = false; -#endif +template <typename T> +inline T REF_ADD(const T& a, const T& b) { + return a + b; +} +template <typename T> +inline T REF_SUB(const T& a, const T& b) { + return a - b; +} +template <typename T> +inline T REF_MUL(const T& a, const T& b) { + return a * b; +} +template <typename T> +inline T REF_DIV(const T& a, const T& b) { + return a / b; +} +template <typename T> +inline T REF_ABS_DIFF(const T& a, const T& b) { + return a > b ? a - b : b - a; +} -namespace Eigen { -namespace internal { -template<typename T> T negate(const T& x) { return -x; } +// Specializations for bool. +template <> +inline bool REF_ADD(const bool& a, const bool& b) { + return a || b; +} +template <> +inline bool REF_SUB(const bool& a, const bool& b) { + return a ^ b; } +template <> +inline bool REF_MUL(const bool& a, const bool& b) { + return a && b; } -// NOTE: we disbale inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU. -template<typename Scalar> EIGEN_DONT_INLINE -bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue) -{ - return internal::isMuchSmallerThan(a-b, refvalue); +template <typename T> +inline T REF_FREXP(const T& x, T& exp) { + int iexp; + EIGEN_USING_STD(frexp) + const T out = static_cast<T>(frexp(x, &iexp)); + exp = static_cast<T>(iexp); + return out; } -template<typename Scalar> bool areApproxAbs(const Scalar* a, const Scalar* b, int size, const typename NumTraits<Scalar>::Real& refvalue) -{ - for (int i=0; i<size; ++i) - { - if (!isApproxAbs(a[i],b[i],refvalue)) - { - std::cout << "ref: [" << Map<const Matrix<Scalar,1,Dynamic> >(a,size) << "]" << " != vec: [" << Map<const Matrix<Scalar,1,Dynamic> >(b,size) << "]\n"; - return false; +template <typename T> +inline T REF_LDEXP(const T& x, const T& exp) { + EIGEN_USING_STD(ldexp) + return static_cast<T>(ldexp(x, static_cast<int>(exp))); +} + +// Uses pcast to cast from one array to another. +template <typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio> +struct pcast_array; + +template <typename SrcPacket, typename TgtPacket, int TgtCoeffRatio> +struct pcast_array<SrcPacket, TgtPacket, 1, TgtCoeffRatio> { + typedef typename internal::unpacket_traits<SrcPacket>::type SrcScalar; + typedef typename internal::unpacket_traits<TgtPacket>::type TgtScalar; + static void cast(const SrcScalar* src, size_t size, TgtScalar* dst) { + static const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size; + static const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size; + size_t i; + for (i = 0; i < size && i + SrcPacketSize <= size; i += TgtPacketSize) { + internal::pstoreu(dst + i, internal::pcast<SrcPacket, TgtPacket>(internal::ploadu<SrcPacket>(src + i))); + } + // Leftovers that cannot be loaded into a packet. + for (; i < size; ++i) { + dst[i] = static_cast<TgtScalar>(src[i]); } } - return true; -} +}; -template<typename Scalar> bool areApprox(const Scalar* a, const Scalar* b, int size) -{ - for (int i=0; i<size; ++i) - { - if (a[i]!=b[i] && !internal::isApprox(a[i],b[i])) - { - std::cout << "ref: [" << Map<const Matrix<Scalar,1,Dynamic> >(a,size) << "]" << " != vec: [" << Map<const Matrix<Scalar,1,Dynamic> >(b,size) << "]\n"; - return false; +template <typename SrcPacket, typename TgtPacket> +struct pcast_array<SrcPacket, TgtPacket, 2, 1> { + static void cast(const typename internal::unpacket_traits<SrcPacket>::type* src, size_t size, + typename internal::unpacket_traits<TgtPacket>::type* dst) { + static const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size; + static const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size; + for (size_t i = 0; i < size; i += TgtPacketSize) { + SrcPacket a = internal::ploadu<SrcPacket>(src + i); + SrcPacket b = internal::ploadu<SrcPacket>(src + i + SrcPacketSize); + internal::pstoreu(dst + i, internal::pcast<SrcPacket, TgtPacket>(a, b)); } } - return true; -} +}; -#define CHECK_CWISE1(REFOP, POP) { \ - for (int i=0; i<PacketSize; ++i) \ - ref[i] = REFOP(data1[i]); \ - internal::pstore(data2, POP(internal::pload<Packet>(data1))); \ - VERIFY(areApprox(ref, data2, PacketSize) && #POP); \ -} +template <typename SrcPacket, typename TgtPacket> +struct pcast_array<SrcPacket, TgtPacket, 4, 1> { + static void cast(const typename internal::unpacket_traits<SrcPacket>::type* src, size_t size, + typename internal::unpacket_traits<TgtPacket>::type* dst) { + static const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size; + static const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size; + for (size_t i = 0; i < size; i += TgtPacketSize) { + SrcPacket a = internal::ploadu<SrcPacket>(src + i); + SrcPacket b = internal::ploadu<SrcPacket>(src + i + SrcPacketSize); + SrcPacket c = internal::ploadu<SrcPacket>(src + i + 2 * SrcPacketSize); + SrcPacket d = internal::ploadu<SrcPacket>(src + i + 3 * SrcPacketSize); + internal::pstoreu(dst + i, internal::pcast<SrcPacket, TgtPacket>(a, b, c, d)); + } + } +}; -template<bool Cond,typename Packet> -struct packet_helper -{ - template<typename T> - inline Packet load(const T* from) const { return internal::pload<Packet>(from); } +template <typename SrcPacket, typename TgtPacket> +struct pcast_array<SrcPacket, TgtPacket, 8, 1> { + static void cast(const typename internal::unpacket_traits<SrcPacket>::type* src, size_t size, + typename internal::unpacket_traits<TgtPacket>::type* dst) { + static const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size; + static const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size; + for (size_t i = 0; i < size; i += TgtPacketSize) { + SrcPacket a = internal::ploadu<SrcPacket>(src + i); + SrcPacket b = internal::ploadu<SrcPacket>(src + i + SrcPacketSize); + SrcPacket c = internal::ploadu<SrcPacket>(src + i + 2 * SrcPacketSize); + SrcPacket d = internal::ploadu<SrcPacket>(src + i + 3 * SrcPacketSize); + SrcPacket e = internal::ploadu<SrcPacket>(src + i + 4 * SrcPacketSize); + SrcPacket f = internal::ploadu<SrcPacket>(src + i + 5 * SrcPacketSize); + SrcPacket g = internal::ploadu<SrcPacket>(src + i + 6 * SrcPacketSize); + SrcPacket h = internal::ploadu<SrcPacket>(src + i + 7 * SrcPacketSize); + internal::pstoreu(dst + i, internal::pcast<SrcPacket, TgtPacket>(a, b, c, d, e, f, g, h)); + } + } +}; + +template <typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio, bool CanCast = false> +struct test_cast_helper; - template<typename T> - inline void store(T* to, const Packet& x) const { internal::pstore(to,x); } +template <typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio> +struct test_cast_helper<SrcPacket, TgtPacket, SrcCoeffRatio, TgtCoeffRatio, false> { + static void run() {} }; -template<typename Packet> -struct packet_helper<false,Packet> -{ - template<typename T> - inline T load(const T* from) const { return *from; } +template <typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio> +struct test_cast_helper<SrcPacket, TgtPacket, SrcCoeffRatio, TgtCoeffRatio, true> { + static void run() { + typedef typename internal::unpacket_traits<SrcPacket>::type SrcScalar; + typedef typename internal::unpacket_traits<TgtPacket>::type TgtScalar; + static const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size; + static const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size; + static const int BlockSize = SrcPacketSize * SrcCoeffRatio; + eigen_assert(BlockSize == TgtPacketSize * TgtCoeffRatio && "Packet sizes and cast ratios are mismatched."); + + static const int DataSize = 10 * BlockSize; + EIGEN_ALIGN_MAX SrcScalar data1[DataSize]; + EIGEN_ALIGN_MAX TgtScalar data2[DataSize]; + EIGEN_ALIGN_MAX TgtScalar ref[DataSize]; + + // Construct a packet of scalars that will not overflow when casting + for (int i = 0; i < DataSize; ++i) { + data1[i] = internal::random_without_cast_overflow<SrcScalar, TgtScalar>::value(); + } + + for (int i = 0; i < DataSize; ++i) { + ref[i] = static_cast<const TgtScalar>(data1[i]); + } - template<typename T> - inline void store(T* to, const T& x) const { *to = x; } + pcast_array<SrcPacket, TgtPacket, SrcCoeffRatio, TgtCoeffRatio>::cast(data1, DataSize, data2); + + VERIFY(test::areApprox(ref, data2, DataSize) && "internal::pcast<>"); + } }; -#define CHECK_CWISE1_IF(COND, REFOP, POP) if(COND) { \ - packet_helper<COND,Packet> h; \ - for (int i=0; i<PacketSize; ++i) \ - ref[i] = REFOP(data1[i]); \ - h.store(data2, POP(h.load(data1))); \ - VERIFY(areApprox(ref, data2, PacketSize) && #POP); \ +template <typename SrcPacket, typename TgtPacket> +struct test_cast { + static void run() { + typedef typename internal::unpacket_traits<SrcPacket>::type SrcScalar; + typedef typename internal::unpacket_traits<TgtPacket>::type TgtScalar; + typedef typename internal::type_casting_traits<SrcScalar, TgtScalar> TypeCastingTraits; + static const int SrcCoeffRatio = TypeCastingTraits::SrcCoeffRatio; + static const int TgtCoeffRatio = TypeCastingTraits::TgtCoeffRatio; + static const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size; + static const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size; + static const bool HasCast = + internal::unpacket_traits<SrcPacket>::vectorizable && internal::unpacket_traits<TgtPacket>::vectorizable && + TypeCastingTraits::VectorizedCast && (SrcPacketSize * SrcCoeffRatio == TgtPacketSize * TgtCoeffRatio); + test_cast_helper<SrcPacket, TgtPacket, SrcCoeffRatio, TgtCoeffRatio, HasCast>::run(); + } +}; + +template <typename SrcPacket, typename TgtScalar, + typename TgtPacket = typename internal::packet_traits<TgtScalar>::type, + bool Vectorized = internal::packet_traits<TgtScalar>::Vectorizable, + bool HasHalf = !internal::is_same<typename internal::unpacket_traits<TgtPacket>::half, TgtPacket>::value> +struct test_cast_runner; + +template <typename SrcPacket, typename TgtScalar, typename TgtPacket> +struct test_cast_runner<SrcPacket, TgtScalar, TgtPacket, true, false> { + static void run() { test_cast<SrcPacket, TgtPacket>::run(); } +}; + +template <typename SrcPacket, typename TgtScalar, typename TgtPacket> +struct test_cast_runner<SrcPacket, TgtScalar, TgtPacket, true, true> { + static void run() { + test_cast<SrcPacket, TgtPacket>::run(); + test_cast_runner<SrcPacket, TgtScalar, typename internal::unpacket_traits<TgtPacket>::half>::run(); + } +}; + +template <typename SrcPacket, typename TgtScalar, typename TgtPacket> +struct test_cast_runner<SrcPacket, TgtScalar, TgtPacket, false, false> { + static void run() {} +}; + +template <typename Scalar, typename Packet, typename EnableIf = void> +struct packetmath_pcast_ops_runner { + static void run() { + test_cast_runner<Packet, float>::run(); + test_cast_runner<Packet, double>::run(); + test_cast_runner<Packet, int8_t>::run(); + test_cast_runner<Packet, uint8_t>::run(); + test_cast_runner<Packet, int16_t>::run(); + test_cast_runner<Packet, uint16_t>::run(); + test_cast_runner<Packet, int32_t>::run(); + test_cast_runner<Packet, uint32_t>::run(); + test_cast_runner<Packet, int64_t>::run(); + test_cast_runner<Packet, uint64_t>::run(); + test_cast_runner<Packet, bool>::run(); + test_cast_runner<Packet, std::complex<float> >::run(); + test_cast_runner<Packet, std::complex<double> >::run(); + test_cast_runner<Packet, half>::run(); + test_cast_runner<Packet, bfloat16>::run(); + } +}; + +// Only some types support cast from std::complex<>. +template <typename Scalar, typename Packet> +struct packetmath_pcast_ops_runner<Scalar, Packet, typename internal::enable_if<NumTraits<Scalar>::IsComplex>::type> { + static void run() { + test_cast_runner<Packet, std::complex<float> >::run(); + test_cast_runner<Packet, std::complex<double> >::run(); + test_cast_runner<Packet, half>::run(); + test_cast_runner<Packet, bfloat16>::run(); + } +}; + +template <typename Scalar, typename Packet> +void packetmath_boolean_mask_ops() { + const int PacketSize = internal::unpacket_traits<Packet>::size; + const int size = 2 * PacketSize; + EIGEN_ALIGN_MAX Scalar data1[size]; + EIGEN_ALIGN_MAX Scalar data2[size]; + EIGEN_ALIGN_MAX Scalar ref[size]; + + for (int i = 0; i < size; ++i) { + data1[i] = internal::random<Scalar>(); + } + CHECK_CWISE1(internal::ptrue, internal::ptrue); + CHECK_CWISE2_IF(true, internal::pandnot, internal::pandnot); + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(i); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + + CHECK_CWISE2_IF(true, internal::pcmp_eq, internal::pcmp_eq); + + //Test (-0) == (0) for signed operations + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(-0.0); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_eq, internal::pcmp_eq); + + //Test NaN + for (int i = 0; i < PacketSize; ++i) { + data1[i] = NumTraits<Scalar>::quiet_NaN(); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_eq, internal::pcmp_eq); } -#define CHECK_CWISE2_IF(COND, REFOP, POP) if(COND) { \ - packet_helper<COND,Packet> h; \ - for (int i=0; i<PacketSize; ++i) \ - ref[i] = REFOP(data1[i], data1[i+PacketSize]); \ - h.store(data2, POP(h.load(data1),h.load(data1+PacketSize))); \ - VERIFY(areApprox(ref, data2, PacketSize) && #POP); \ +template <typename Scalar, typename Packet> +void packetmath_boolean_mask_ops_real() { + const int PacketSize = internal::unpacket_traits<Packet>::size; + const int size = 2 * PacketSize; + EIGEN_ALIGN_MAX Scalar data1[size]; + EIGEN_ALIGN_MAX Scalar data2[size]; + EIGEN_ALIGN_MAX Scalar ref[size]; + + for (int i = 0; i < PacketSize; ++i) { + data1[i] = internal::random<Scalar>(); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + + CHECK_CWISE2_IF(true, internal::pcmp_lt_or_nan, internal::pcmp_lt_or_nan); + + //Test (-0) <=/< (0) for signed operations + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(-0.0); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_lt_or_nan, internal::pcmp_lt_or_nan); + + //Test NaN + for (int i = 0; i < PacketSize; ++i) { + data1[i] = NumTraits<Scalar>::quiet_NaN(); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_lt_or_nan, internal::pcmp_lt_or_nan); +} + +template <typename Scalar, typename Packet> +void packetmath_boolean_mask_ops_notcomplex() { + const int PacketSize = internal::unpacket_traits<Packet>::size; + const int size = 2 * PacketSize; + EIGEN_ALIGN_MAX Scalar data1[size]; + EIGEN_ALIGN_MAX Scalar data2[size]; + EIGEN_ALIGN_MAX Scalar ref[size]; + + for (int i = 0; i < PacketSize; ++i) { + data1[i] = internal::random<Scalar>(); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + + CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); + CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); + + //Test (-0) <=/< (0) for signed operations + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(-0.0); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); + CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); + + //Test NaN + for (int i = 0; i < PacketSize; ++i) { + data1[i] = NumTraits<Scalar>::quiet_NaN(); + data1[i + PacketSize] = internal::random<bool>() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); + CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); +} + +// Packet16b representing bool does not support ptrue, pandnot or pcmp_eq, since the scalar path +// (for some compilers) compute the bitwise and with 0x1 of the results to keep the value in [0,1]. +template<> +void packetmath_boolean_mask_ops<bool, internal::packet_traits<bool>::type>() {} +template<> +void packetmath_boolean_mask_ops_notcomplex<bool, internal::packet_traits<bool>::type>() {} + +template <typename Scalar, typename Packet> +void packetmath_minus_zero_add() { + const int PacketSize = internal::unpacket_traits<Packet>::size; + const int size = 2 * PacketSize; + EIGEN_ALIGN_MAX Scalar data1[size]; + EIGEN_ALIGN_MAX Scalar data2[size]; + EIGEN_ALIGN_MAX Scalar ref[size]; + + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(-0.0); + data1[i + PacketSize] = Scalar(-0.0); + } + CHECK_CWISE2_IF(internal::packet_traits<Scalar>::HasAdd, REF_ADD, internal::padd); } -#define REF_ADD(a,b) ((a)+(b)) -#define REF_SUB(a,b) ((a)-(b)) -#define REF_MUL(a,b) ((a)*(b)) -#define REF_DIV(a,b) ((a)/(b)) +// Ensure optimization barrier compiles and doesn't modify contents. +// Only applies to raw types, so will not work for std::complex, Eigen::half +// or Eigen::bfloat16. For those you would need to refer to an underlying +// storage element. +template<typename Packet, typename EnableIf = void> +struct eigen_optimization_barrier_test { + static void run() {} +}; + +template<typename Packet> +struct eigen_optimization_barrier_test<Packet, typename internal::enable_if< + !NumTraits<Packet>::IsComplex && + !internal::is_same<Packet, Eigen::half>::value && + !internal::is_same<Packet, Eigen::bfloat16>::value + >::type> { + static void run() { + typedef typename internal::unpacket_traits<Packet>::type Scalar; + Scalar s = internal::random<Scalar>(); + Packet barrier = internal::pset1<Packet>(s); + EIGEN_OPTIMIZATION_BARRIER(barrier); + eigen_assert(s == internal::pfirst(barrier) && "EIGEN_OPTIMIZATION_BARRIER"); + } +}; -template<typename Scalar> void packetmath() -{ - using std::abs; +template <typename Scalar, typename Packet> +void packetmath() { typedef internal::packet_traits<Scalar> PacketTraits; - typedef typename PacketTraits::type Packet; - const int PacketSize = PacketTraits::size; + const int PacketSize = internal::unpacket_traits<Packet>::size; typedef typename NumTraits<Scalar>::Real RealScalar; + if (g_first_pass) + std::cerr << "=== Testing packet of type '" << typeid(Packet).name() << "' and scalar type '" + << typeid(Scalar).name() << "' and size '" << PacketSize << "' ===\n"; + const int max_size = PacketSize > 4 ? PacketSize : 4; - const int size = PacketSize*max_size; + const int size = PacketSize * max_size; EIGEN_ALIGN_MAX Scalar data1[size]; EIGEN_ALIGN_MAX Scalar data2[size]; - EIGEN_ALIGN_MAX Packet packets[PacketSize*2]; + EIGEN_ALIGN_MAX Scalar data3[size]; EIGEN_ALIGN_MAX Scalar ref[size]; - RealScalar refvalue = 0; - for (int i=0; i<size; ++i) - { - data1[i] = internal::random<Scalar>()/RealScalar(PacketSize); - data2[i] = internal::random<Scalar>()/RealScalar(PacketSize); - refvalue = (std::max)(refvalue,abs(data1[i])); + RealScalar refvalue = RealScalar(0); + + eigen_optimization_barrier_test<Packet>::run(); + eigen_optimization_barrier_test<Scalar>::run(); + + for (int i = 0; i < size; ++i) { + data1[i] = internal::random<Scalar>() / RealScalar(PacketSize); + data2[i] = internal::random<Scalar>() / RealScalar(PacketSize); + refvalue = (std::max)(refvalue, numext::abs(data1[i])); } internal::pstore(data2, internal::pload<Packet>(data1)); - VERIFY(areApprox(data1, data2, PacketSize) && "aligned load/store"); + VERIFY(test::areApprox(data1, data2, PacketSize) && "aligned load/store"); - for (int offset=0; offset<PacketSize; ++offset) - { - internal::pstore(data2, internal::ploadu<Packet>(data1+offset)); - VERIFY(areApprox(data1+offset, data2, PacketSize) && "internal::ploadu"); + for (int offset = 0; offset < PacketSize; ++offset) { + internal::pstore(data2, internal::ploadu<Packet>(data1 + offset)); + VERIFY(test::areApprox(data1 + offset, data2, PacketSize) && "internal::ploadu"); } - for (int offset=0; offset<PacketSize; ++offset) - { - internal::pstoreu(data2+offset, internal::pload<Packet>(data1)); - VERIFY(areApprox(data1, data2+offset, PacketSize) && "internal::pstoreu"); + for (int offset = 0; offset < PacketSize; ++offset) { + internal::pstoreu(data2 + offset, internal::pload<Packet>(data1)); + VERIFY(test::areApprox(data1, data2 + offset, PacketSize) && "internal::pstoreu"); } - for (int offset=0; offset<PacketSize; ++offset) - { - packets[0] = internal::pload<Packet>(data1); - packets[1] = internal::pload<Packet>(data1+PacketSize); - if (offset==0) internal::palign<0>(packets[0], packets[1]); - else if (offset==1) internal::palign<1>(packets[0], packets[1]); - else if (offset==2) internal::palign<2>(packets[0], packets[1]); - else if (offset==3) internal::palign<3>(packets[0], packets[1]); - else if (offset==4) internal::palign<4>(packets[0], packets[1]); - else if (offset==5) internal::palign<5>(packets[0], packets[1]); - else if (offset==6) internal::palign<6>(packets[0], packets[1]); - else if (offset==7) internal::palign<7>(packets[0], packets[1]); - else if (offset==8) internal::palign<8>(packets[0], packets[1]); - else if (offset==9) internal::palign<9>(packets[0], packets[1]); - else if (offset==10) internal::palign<10>(packets[0], packets[1]); - else if (offset==11) internal::palign<11>(packets[0], packets[1]); - else if (offset==12) internal::palign<12>(packets[0], packets[1]); - else if (offset==13) internal::palign<13>(packets[0], packets[1]); - else if (offset==14) internal::palign<14>(packets[0], packets[1]); - else if (offset==15) internal::palign<15>(packets[0], packets[1]); - internal::pstore(data2, packets[0]); - - for (int i=0; i<PacketSize; ++i) - ref[i] = data1[i+offset]; - - VERIFY(areApprox(ref, data2, PacketSize) && "internal::palign"); + if (internal::unpacket_traits<Packet>::masked_load_available) { + test::packet_helper<internal::unpacket_traits<Packet>::masked_load_available, Packet> h; + unsigned long long max_umask = (0x1ull << PacketSize); + + for (int offset = 0; offset < PacketSize; ++offset) { + for (unsigned long long umask = 0; umask < max_umask; ++umask) { + h.store(data2, h.load(data1 + offset, umask)); + for (int k = 0; k < PacketSize; ++k) data3[k] = ((umask & (0x1ull << k)) >> k) ? data1[k + offset] : Scalar(0); + VERIFY(test::areApprox(data3, data2, PacketSize) && "internal::ploadu masked"); + } + } + } + + if (internal::unpacket_traits<Packet>::masked_store_available) { + test::packet_helper<internal::unpacket_traits<Packet>::masked_store_available, Packet> h; + unsigned long long max_umask = (0x1ull << PacketSize); + + for (int offset = 0; offset < PacketSize; ++offset) { + for (unsigned long long umask = 0; umask < max_umask; ++umask) { + internal::pstore(data2, internal::pset1<Packet>(Scalar(0))); + h.store(data2, h.loadu(data1 + offset), umask); + for (int k = 0; k < PacketSize; ++k) data3[k] = ((umask & (0x1ull << k)) >> k) ? data1[k + offset] : Scalar(0); + VERIFY(test::areApprox(data3, data2, PacketSize) && "internal::pstoreu masked"); + } + } } VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasAdd); VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasSub); VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasMul); - VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasNegate); - VERIFY((internal::is_same<Scalar,int>::value) || (!PacketTraits::Vectorizable) || PacketTraits::HasDiv); - CHECK_CWISE2_IF(PacketTraits::HasAdd, REF_ADD, internal::padd); - CHECK_CWISE2_IF(PacketTraits::HasSub, REF_SUB, internal::psub); - CHECK_CWISE2_IF(PacketTraits::HasMul, REF_MUL, internal::pmul); + CHECK_CWISE2_IF(PacketTraits::HasAdd, REF_ADD, internal::padd); + CHECK_CWISE2_IF(PacketTraits::HasSub, REF_SUB, internal::psub); + CHECK_CWISE2_IF(PacketTraits::HasMul, REF_MUL, internal::pmul); CHECK_CWISE2_IF(PacketTraits::HasDiv, REF_DIV, internal::pdiv); - CHECK_CWISE1(internal::negate, internal::pnegate); + if (PacketTraits::HasNegate) CHECK_CWISE1(internal::negate, internal::pnegate); CHECK_CWISE1(numext::conj, internal::pconj); - for(int offset=0;offset<3;++offset) - { - for (int i=0; i<PacketSize; ++i) - ref[i] = data1[offset]; + for (int offset = 0; offset < 3; ++offset) { + for (int i = 0; i < PacketSize; ++i) ref[i] = data1[offset]; internal::pstore(data2, internal::pset1<Packet>(data1[offset])); - VERIFY(areApprox(ref, data2, PacketSize) && "internal::pset1"); + VERIFY(test::areApprox(ref, data2, PacketSize) && "internal::pset1"); } { - for (int i=0; i<PacketSize*4; ++i) - ref[i] = data1[i/PacketSize]; + for (int i = 0; i < PacketSize * 4; ++i) ref[i] = data1[i / PacketSize]; Packet A0, A1, A2, A3; internal::pbroadcast4<Packet>(data1, A0, A1, A2, A3); - internal::pstore(data2+0*PacketSize, A0); - internal::pstore(data2+1*PacketSize, A1); - internal::pstore(data2+2*PacketSize, A2); - internal::pstore(data2+3*PacketSize, A3); - VERIFY(areApprox(ref, data2, 4*PacketSize) && "internal::pbroadcast4"); + internal::pstore(data2 + 0 * PacketSize, A0); + internal::pstore(data2 + 1 * PacketSize, A1); + internal::pstore(data2 + 2 * PacketSize, A2); + internal::pstore(data2 + 3 * PacketSize, A3); + VERIFY(test::areApprox(ref, data2, 4 * PacketSize) && "internal::pbroadcast4"); } { - for (int i=0; i<PacketSize*2; ++i) - ref[i] = data1[i/PacketSize]; + for (int i = 0; i < PacketSize * 2; ++i) ref[i] = data1[i / PacketSize]; Packet A0, A1; internal::pbroadcast2<Packet>(data1, A0, A1); - internal::pstore(data2+0*PacketSize, A0); - internal::pstore(data2+1*PacketSize, A1); - VERIFY(areApprox(ref, data2, 2*PacketSize) && "internal::pbroadcast2"); + internal::pstore(data2 + 0 * PacketSize, A0); + internal::pstore(data2 + 1 * PacketSize, A1); + VERIFY(test::areApprox(ref, data2, 2 * PacketSize) && "internal::pbroadcast2"); } VERIFY(internal::isApprox(data1[0], internal::pfirst(internal::pload<Packet>(data1))) && "internal::pfirst"); - if(PacketSize>1) - { - for(int offset=0;offset<4;++offset) - { - for(int i=0;i<PacketSize/2;++i) - ref[2*i+0] = ref[2*i+1] = data1[offset+i]; - internal::pstore(data2,internal::ploaddup<Packet>(data1+offset)); - VERIFY(areApprox(ref, data2, PacketSize) && "ploaddup"); + if (PacketSize > 1) { + // apply different offsets to check that ploaddup is robust to unaligned inputs + for (int offset = 0; offset < 4; ++offset) { + for (int i = 0; i < PacketSize / 2; ++i) ref[2 * i + 0] = ref[2 * i + 1] = data1[offset + i]; + internal::pstore(data2, internal::ploaddup<Packet>(data1 + offset)); + VERIFY(test::areApprox(ref, data2, PacketSize) && "ploaddup"); } } - if(PacketSize>2) - { - for(int offset=0;offset<4;++offset) - { - for(int i=0;i<PacketSize/4;++i) - ref[4*i+0] = ref[4*i+1] = ref[4*i+2] = ref[4*i+3] = data1[offset+i]; - internal::pstore(data2,internal::ploadquad<Packet>(data1+offset)); - VERIFY(areApprox(ref, data2, PacketSize) && "ploadquad"); + if (PacketSize > 2) { + // apply different offsets to check that ploadquad is robust to unaligned inputs + for (int offset = 0; offset < 4; ++offset) { + for (int i = 0; i < PacketSize / 4; ++i) + ref[4 * i + 0] = ref[4 * i + 1] = ref[4 * i + 2] = ref[4 * i + 3] = data1[offset + i]; + internal::pstore(data2, internal::ploadquad<Packet>(data1 + offset)); + VERIFY(test::areApprox(ref, data2, PacketSize) && "ploadquad"); } } - ref[0] = 0; - for (int i=0; i<PacketSize; ++i) - ref[0] += data1[i]; - VERIFY(isApproxAbs(ref[0], internal::predux(internal::pload<Packet>(data1)), refvalue) && "internal::predux"); + ref[0] = Scalar(0); + for (int i = 0; i < PacketSize; ++i) ref[0] += data1[i]; + VERIFY(test::isApproxAbs(ref[0], internal::predux(internal::pload<Packet>(data1)), refvalue) && "internal::predux"); - { - for (int i=0; i<4; ++i) - ref[i] = 0; - for (int i=0; i<PacketSize; ++i) - ref[i%4] += data1[i]; - internal::pstore(data2, internal::predux_downto4(internal::pload<Packet>(data1))); - VERIFY(areApprox(ref, data2, PacketSize>4?PacketSize/2:PacketSize) && "internal::predux_downto4"); + if (!internal::is_same<Packet, typename internal::unpacket_traits<Packet>::half>::value) { + int HalfPacketSize = PacketSize > 4 ? PacketSize / 2 : PacketSize; + for (int i = 0; i < HalfPacketSize; ++i) ref[i] = Scalar(0); + for (int i = 0; i < PacketSize; ++i) ref[i % HalfPacketSize] += data1[i]; + internal::pstore(data2, internal::predux_half_dowto4(internal::pload<Packet>(data1))); + VERIFY(test::areApprox(ref, data2, HalfPacketSize) && "internal::predux_half_dowto4"); } - ref[0] = 1; - for (int i=0; i<PacketSize; ++i) - ref[0] *= data1[i]; + ref[0] = Scalar(1); + for (int i = 0; i < PacketSize; ++i) ref[0] = REF_MUL(ref[0], data1[i]); VERIFY(internal::isApprox(ref[0], internal::predux_mul(internal::pload<Packet>(data1))) && "internal::predux_mul"); - for (int j=0; j<PacketSize; ++j) - { - ref[j] = 0; - for (int i=0; i<PacketSize; ++i) - ref[j] += data1[i+j*PacketSize]; - packets[j] = internal::pload<Packet>(data1+j*PacketSize); - } - internal::pstore(data2, internal::preduxp(packets)); - VERIFY(areApproxAbs(ref, data2, PacketSize, refvalue) && "internal::preduxp"); - - for (int i=0; i<PacketSize; ++i) - ref[i] = data1[PacketSize-i-1]; + for (int i = 0; i < PacketSize; ++i) ref[i] = data1[PacketSize - i - 1]; internal::pstore(data2, internal::preverse(internal::pload<Packet>(data1))); - VERIFY(areApprox(ref, data2, PacketSize) && "internal::preverse"); + VERIFY(test::areApprox(ref, data2, PacketSize) && "internal::preverse"); internal::PacketBlock<Packet> kernel; - for (int i=0; i<PacketSize; ++i) { - kernel.packet[i] = internal::pload<Packet>(data1+i*PacketSize); + for (int i = 0; i < PacketSize; ++i) { + kernel.packet[i] = internal::pload<Packet>(data1 + i * PacketSize); } ptranspose(kernel); - for (int i=0; i<PacketSize; ++i) { + for (int i = 0; i < PacketSize; ++i) { internal::pstore(data2, kernel.packet[i]); for (int j = 0; j < PacketSize; ++j) { - VERIFY(isApproxAbs(data2[j], data1[i+j*PacketSize], refvalue) && "ptranspose"); + VERIFY(test::isApproxAbs(data2[j], data1[i + j * PacketSize], refvalue) && "ptranspose"); + } + } + + // GeneralBlockPanelKernel also checks PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize>; + if (PacketSize > 4 && PacketSize % 4 == 0) { + internal::PacketBlock<Packet, PacketSize%4==0?4:PacketSize> kernel2; + for (int i = 0; i < 4; ++i) { + kernel2.packet[i] = internal::pload<Packet>(data1 + i * PacketSize); + } + ptranspose(kernel2); + int data_counter = 0; + for (int i = 0; i < PacketSize; ++i) { + for (int j = 0; j < 4; ++j) { + data2[data_counter++] = data1[j*PacketSize + i]; + } + } + for (int i = 0; i < 4; ++i) { + internal::pstore(data3, kernel2.packet[i]); + for (int j = 0; j < PacketSize; ++j) { + VERIFY(test::isApproxAbs(data3[j], data2[i*PacketSize + j], refvalue) && "ptranspose"); + } } } @@ -300,342 +579,724 @@ template<typename Scalar> void packetmath() EIGEN_ALIGN_MAX Scalar result[size]; internal::pstore(result, blend); for (int i = 0; i < PacketSize; ++i) { - VERIFY(isApproxAbs(result[i], (selector.select[i] ? data1[i] : data2[i]), refvalue)); + VERIFY(test::isApproxAbs(result[i], (selector.select[i] ? data1[i] : data2[i]), refvalue)); } } - if (PacketTraits::HasBlend || g_vectorize_sse) { - // pinsertfirst - for (int i=0; i<PacketSize; ++i) - ref[i] = data1[i]; - Scalar s = internal::random<Scalar>(); - ref[0] = s; - internal::pstore(data2, internal::pinsertfirst(internal::pload<Packet>(data1),s)); - VERIFY(areApprox(ref, data2, PacketSize) && "internal::pinsertfirst"); + { + for (int i = 0; i < PacketSize; ++i) { + // "if" mask + unsigned char v = internal::random<bool>() ? 0xff : 0; + char* bytes = (char*)(data1 + i); + for (int k = 0; k < int(sizeof(Scalar)); ++k) { + bytes[k] = v; + } + // "then" packet + data1[i + PacketSize] = internal::random<Scalar>(); + // "else" packet + data1[i + 2 * PacketSize] = internal::random<Scalar>(); + } + CHECK_CWISE3_IF(true, internal::pselect, internal::pselect); } - if (PacketTraits::HasBlend || g_vectorize_sse) { - // pinsertlast - for (int i=0; i<PacketSize; ++i) - ref[i] = data1[i]; - Scalar s = internal::random<Scalar>(); - ref[PacketSize-1] = s; - internal::pstore(data2, internal::pinsertlast(internal::pload<Packet>(data1),s)); - VERIFY(areApprox(ref, data2, PacketSize) && "internal::pinsertlast"); + for (int i = 0; i < size; ++i) { + data1[i] = internal::random<Scalar>(); + } + CHECK_CWISE1(internal::pzero, internal::pzero); + CHECK_CWISE2_IF(true, internal::por, internal::por); + CHECK_CWISE2_IF(true, internal::pxor, internal::pxor); + CHECK_CWISE2_IF(true, internal::pand, internal::pand); + + packetmath_boolean_mask_ops<Scalar, Packet>(); + packetmath_pcast_ops_runner<Scalar, Packet>::run(); + packetmath_minus_zero_add<Scalar, Packet>(); + + for (int i = 0; i < size; ++i) { + data1[i] = numext::abs(internal::random<Scalar>()); } + CHECK_CWISE1_IF(PacketTraits::HasSqrt, numext::sqrt, internal::psqrt); + CHECK_CWISE1_IF(PacketTraits::HasRsqrt, numext::rsqrt, internal::prsqrt); } -template<typename Scalar> void packetmath_real() -{ - using std::abs; +// Notice that this definition works for complex types as well. +// c++11 has std::log2 for real, but not for complex types. +template <typename Scalar> +Scalar log2(Scalar x) { + return Scalar(EIGEN_LOG2E) * std::log(x); +} + +template <typename Scalar, typename Packet> +void packetmath_real() { typedef internal::packet_traits<Scalar> PacketTraits; - typedef typename PacketTraits::type Packet; - const int PacketSize = PacketTraits::size; + const int PacketSize = internal::unpacket_traits<Packet>::size; - const int size = PacketSize*4; - EIGEN_ALIGN_MAX Scalar data1[PacketTraits::size*4]; - EIGEN_ALIGN_MAX Scalar data2[PacketTraits::size*4]; - EIGEN_ALIGN_MAX Scalar ref[PacketTraits::size*4]; + const int size = PacketSize * 4; + EIGEN_ALIGN_MAX Scalar data1[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar data2[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar ref[PacketSize * 4]; - for (int i=0; i<size; ++i) - { - data1[i] = internal::random<Scalar>(-1,1) * std::pow(Scalar(10), internal::random<Scalar>(-3,3)); - data2[i] = internal::random<Scalar>(-1,1) * std::pow(Scalar(10), internal::random<Scalar>(-3,3)); + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<double>(0, 1) * std::pow(10., internal::random<double>(-6, 6))); + data2[i] = Scalar(internal::random<double>(0, 1) * std::pow(10., internal::random<double>(-6, 6))); + } + + if (internal::random<float>(0, 1) < 0.1f) data1[internal::random<int>(0, PacketSize)] = Scalar(0); + + CHECK_CWISE1_IF(PacketTraits::HasLog, std::log, internal::plog); + CHECK_CWISE1_IF(PacketTraits::HasLog, log2, internal::plog2); + CHECK_CWISE1_IF(PacketTraits::HasRsqrt, numext::rsqrt, internal::prsqrt); + + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<double>(-1, 1) * std::pow(10., internal::random<double>(-3, 3))); + data2[i] = Scalar(internal::random<double>(-1, 1) * std::pow(10., internal::random<double>(-3, 3))); } CHECK_CWISE1_IF(PacketTraits::HasSin, std::sin, internal::psin); CHECK_CWISE1_IF(PacketTraits::HasCos, std::cos, internal::pcos); CHECK_CWISE1_IF(PacketTraits::HasTan, std::tan, internal::ptan); - CHECK_CWISE1_IF(PacketTraits::HasRound, numext::round, internal::pround); - CHECK_CWISE1_IF(PacketTraits::HasCeil, numext::ceil, internal::pceil); - CHECK_CWISE1_IF(PacketTraits::HasFloor, numext::floor, internal::pfloor); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasRound, numext::round, internal::pround); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasCeil, numext::ceil, internal::pceil); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasFloor, numext::floor, internal::pfloor); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasRint, numext::rint, internal::print); + + packetmath_boolean_mask_ops_real<Scalar,Packet>(); + + // Rounding edge cases. + if (PacketTraits::HasRound || PacketTraits::HasCeil || PacketTraits::HasFloor || PacketTraits::HasRint) { + typedef typename internal::make_integer<Scalar>::type IntType; + // Start with values that cannot fit inside an integer, work down to less than one. + Scalar val = numext::mini( + Scalar(2) * static_cast<Scalar>(NumTraits<IntType>::highest()), + NumTraits<Scalar>::highest()); + std::vector<Scalar> values; + while (val > Scalar(0.25)) { + // Cover both even and odd, positive and negative cases. + values.push_back(val); + values.push_back(val + Scalar(0.3)); + values.push_back(val + Scalar(0.5)); + values.push_back(val + Scalar(0.8)); + values.push_back(val + Scalar(1)); + values.push_back(val + Scalar(1.3)); + values.push_back(val + Scalar(1.5)); + values.push_back(val + Scalar(1.8)); + values.push_back(-val); + values.push_back(-val - Scalar(0.3)); + values.push_back(-val - Scalar(0.5)); + values.push_back(-val - Scalar(0.8)); + values.push_back(-val - Scalar(1)); + values.push_back(-val - Scalar(1.3)); + values.push_back(-val - Scalar(1.5)); + values.push_back(-val - Scalar(1.8)); + values.push_back(Scalar(-1.5) + val); // Bug 1785. + val = val / Scalar(2); + } + values.push_back(NumTraits<Scalar>::infinity()); + values.push_back(-NumTraits<Scalar>::infinity()); + values.push_back(NumTraits<Scalar>::quiet_NaN()); + + for (size_t k=0; k<values.size(); ++k) { + data1[0] = values[k]; + CHECK_CWISE1_EXACT_IF(PacketTraits::HasRound, numext::round, internal::pround); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasCeil, numext::ceil, internal::pceil); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasFloor, numext::floor, internal::pfloor); + CHECK_CWISE1_EXACT_IF(PacketTraits::HasRint, numext::rint, internal::print); + } + } - for (int i=0; i<size; ++i) - { - data1[i] = internal::random<Scalar>(-1,1); - data2[i] = internal::random<Scalar>(-1,1); + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<double>(-1, 1)); + data2[i] = Scalar(internal::random<double>(-1, 1)); } CHECK_CWISE1_IF(PacketTraits::HasASin, std::asin, internal::pasin); CHECK_CWISE1_IF(PacketTraits::HasACos, std::acos, internal::pacos); - for (int i=0; i<size; ++i) - { - data1[i] = internal::random<Scalar>(-87,88); - data2[i] = internal::random<Scalar>(-87,88); + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<double>(-87, 88)); + data2[i] = Scalar(internal::random<double>(-87, 88)); } CHECK_CWISE1_IF(PacketTraits::HasExp, std::exp, internal::pexp); - for (int i=0; i<size; ++i) - { - data1[i] = internal::random<Scalar>(-1,1) * std::pow(Scalar(10), internal::random<Scalar>(-6,6)); - data2[i] = internal::random<Scalar>(-1,1) * std::pow(Scalar(10), internal::random<Scalar>(-6,6)); + + CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); + if (PacketTraits::HasExp) { + // Check denormals: + for (int j=0; j<3; ++j) { + data1[0] = Scalar(std::ldexp(1, NumTraits<Scalar>::min_exponent()-j)); + CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); + data1[0] = -data1[0]; + CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); + } + + // zero + data1[0] = Scalar(0); + CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); + + // inf and NaN only compare output fraction, not exponent. + test::packet_helper<PacketTraits::HasExp,Packet> h; + Packet pout; + Scalar sout; + Scalar special[] = { NumTraits<Scalar>::infinity(), + -NumTraits<Scalar>::infinity(), + NumTraits<Scalar>::quiet_NaN()}; + for (int i=0; i<3; ++i) { + data1[0] = special[i]; + ref[0] = Scalar(REF_FREXP(data1[0], ref[PacketSize])); + h.store(data2, internal::pfrexp(h.load(data1), h.forward_reference(pout, sout))); + VERIFY(test::areApprox(ref, data2, 1) && "internal::pfrexp"); + } + } + + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(internal::random<double>(-1, 1)); + data2[i] = Scalar(internal::random<double>(-1, 1)); + } + for (int i = 0; i < PacketSize; ++i) { + data1[i+PacketSize] = Scalar(internal::random<int>(-4, 4)); + data2[i+PacketSize] = Scalar(internal::random<double>(-4, 4)); } + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + if (PacketTraits::HasExp) { + data1[0] = Scalar(-1); + // underflow to zero + data1[PacketSize] = Scalar(NumTraits<Scalar>::min_exponent()-55); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + // overflow to inf + data1[PacketSize] = Scalar(NumTraits<Scalar>::max_exponent()+10); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + // NaN stays NaN + data1[0] = NumTraits<Scalar>::quiet_NaN(); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + VERIFY((numext::isnan)(data2[0])); + // inf stays inf + data1[0] = NumTraits<Scalar>::infinity(); + data1[PacketSize] = Scalar(NumTraits<Scalar>::min_exponent()-10); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + // zero stays zero + data1[0] = Scalar(0); + data1[PacketSize] = Scalar(NumTraits<Scalar>::max_exponent()+10); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + // Small number big exponent. + data1[0] = Scalar(std::ldexp(Scalar(1.0), NumTraits<Scalar>::min_exponent()-1)); + data1[PacketSize] = Scalar(-NumTraits<Scalar>::min_exponent() + +NumTraits<Scalar>::max_exponent()); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + // Big number small exponent. + data1[0] = Scalar(std::ldexp(Scalar(1.0), NumTraits<Scalar>::max_exponent()-1)); + data1[PacketSize] = Scalar(+NumTraits<Scalar>::min_exponent() + -NumTraits<Scalar>::max_exponent()); + CHECK_CWISE2_IF(PacketTraits::HasExp, REF_LDEXP, internal::pldexp); + } + + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<double>(-1, 1) * std::pow(10., internal::random<double>(-6, 6))); + data2[i] = Scalar(internal::random<double>(-1, 1) * std::pow(10., internal::random<double>(-6, 6))); + } + data1[0] = Scalar(1e-20); CHECK_CWISE1_IF(PacketTraits::HasTanh, std::tanh, internal::ptanh); - if(PacketTraits::HasExp && PacketTraits::size>=2) - { - data1[0] = std::numeric_limits<Scalar>::quiet_NaN(); - data1[1] = std::numeric_limits<Scalar>::epsilon(); - packet_helper<PacketTraits::HasExp,Packet> h; + if (PacketTraits::HasExp && PacketSize >= 2) { + const Scalar small = NumTraits<Scalar>::epsilon(); + data1[0] = NumTraits<Scalar>::quiet_NaN(); + data1[1] = small; + test::packet_helper<PacketTraits::HasExp, Packet> h; h.store(data2, internal::pexp(h.load(data1))); VERIFY((numext::isnan)(data2[0])); - VERIFY_IS_EQUAL(std::exp(std::numeric_limits<Scalar>::epsilon()), data2[1]); + // TODO(rmlarsen): Re-enable for bfloat16. + if (!internal::is_same<Scalar, bfloat16>::value) { + VERIFY_IS_APPROX(std::exp(small), data2[1]); + } - data1[0] = -std::numeric_limits<Scalar>::epsilon(); - data1[1] = 0; + data1[0] = -small; + data1[1] = Scalar(0); h.store(data2, internal::pexp(h.load(data1))); - VERIFY_IS_EQUAL(std::exp(-std::numeric_limits<Scalar>::epsilon()), data2[0]); + // TODO(rmlarsen): Re-enable for bfloat16. + if (!internal::is_same<Scalar, bfloat16>::value) { + VERIFY_IS_APPROX(std::exp(-small), data2[0]); + } VERIFY_IS_EQUAL(std::exp(Scalar(0)), data2[1]); data1[0] = (std::numeric_limits<Scalar>::min)(); data1[1] = -(std::numeric_limits<Scalar>::min)(); h.store(data2, internal::pexp(h.load(data1))); - VERIFY_IS_EQUAL(std::exp((std::numeric_limits<Scalar>::min)()), data2[0]); - VERIFY_IS_EQUAL(std::exp(-(std::numeric_limits<Scalar>::min)()), data2[1]); + VERIFY_IS_APPROX(std::exp((std::numeric_limits<Scalar>::min)()), data2[0]); + VERIFY_IS_APPROX(std::exp(-(std::numeric_limits<Scalar>::min)()), data2[1]); data1[0] = std::numeric_limits<Scalar>::denorm_min(); data1[1] = -std::numeric_limits<Scalar>::denorm_min(); h.store(data2, internal::pexp(h.load(data1))); - VERIFY_IS_EQUAL(std::exp(std::numeric_limits<Scalar>::denorm_min()), data2[0]); - VERIFY_IS_EQUAL(std::exp(-std::numeric_limits<Scalar>::denorm_min()), data2[1]); + VERIFY_IS_APPROX(std::exp(std::numeric_limits<Scalar>::denorm_min()), data2[0]); + VERIFY_IS_APPROX(std::exp(-std::numeric_limits<Scalar>::denorm_min()), data2[1]); } if (PacketTraits::HasTanh) { // NOTE this test migh fail with GCC prior to 6.3, see MathFunctionsImpl.h for details. - data1[0] = std::numeric_limits<Scalar>::quiet_NaN(); - packet_helper<internal::packet_traits<Scalar>::HasTanh,Packet> h; + data1[0] = NumTraits<Scalar>::quiet_NaN(); + test::packet_helper<internal::packet_traits<Scalar>::HasTanh, Packet> h; h.store(data2, internal::ptanh(h.load(data1))); VERIFY((numext::isnan)(data2[0])); } -#if EIGEN_HAS_C99_MATH - { - data1[0] = std::numeric_limits<Scalar>::quiet_NaN(); - packet_helper<internal::packet_traits<Scalar>::HasLGamma,Packet> h; - h.store(data2, internal::plgamma(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - } - { - data1[0] = std::numeric_limits<Scalar>::quiet_NaN(); - packet_helper<internal::packet_traits<Scalar>::HasErf,Packet> h; - h.store(data2, internal::perf(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - } - { - data1[0] = std::numeric_limits<Scalar>::quiet_NaN(); - packet_helper<internal::packet_traits<Scalar>::HasErfc,Packet> h; - h.store(data2, internal::perfc(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - } -#endif // EIGEN_HAS_C99_MATH + if (PacketTraits::HasExp) { + internal::scalar_logistic_op<Scalar> logistic; + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<double>(-20, 20)); + } - for (int i=0; i<size; ++i) - { - data1[i] = internal::random<Scalar>(0,1) * std::pow(Scalar(10), internal::random<Scalar>(-6,6)); - data2[i] = internal::random<Scalar>(0,1) * std::pow(Scalar(10), internal::random<Scalar>(-6,6)); + test::packet_helper<PacketTraits::HasExp, Packet> h; + h.store(data2, logistic.packetOp(h.load(data1))); + for (int i = 0; i < PacketSize; ++i) { + VERIFY_IS_APPROX(data2[i], logistic(data1[i])); + } } - if(internal::random<float>(0,1)<0.1f) - data1[internal::random<int>(0, PacketSize)] = 0; - CHECK_CWISE1_IF(PacketTraits::HasSqrt, std::sqrt, internal::psqrt); - CHECK_CWISE1_IF(PacketTraits::HasLog, std::log, internal::plog); -#if EIGEN_HAS_C99_MATH && (__cplusplus > 199711L) +#if EIGEN_HAS_C99_MATH && (EIGEN_COMP_CXXVER >= 11) + data1[0] = NumTraits<Scalar>::infinity(); + data1[1] = Scalar(-1); CHECK_CWISE1_IF(PacketTraits::HasLog1p, std::log1p, internal::plog1p); - CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLGamma, std::lgamma, internal::plgamma); - CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasErf, std::erf, internal::perf); - CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasErfc, std::erfc, internal::perfc); + data1[0] = NumTraits<Scalar>::infinity(); + data1[1] = -NumTraits<Scalar>::infinity(); + CHECK_CWISE1_IF(PacketTraits::HasExpm1, std::expm1, internal::pexpm1); #endif - if(PacketTraits::HasLog && PacketTraits::size>=2) - { - data1[0] = std::numeric_limits<Scalar>::quiet_NaN(); - data1[1] = std::numeric_limits<Scalar>::epsilon(); - packet_helper<PacketTraits::HasLog,Packet> h; - h.store(data2, internal::plog(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::epsilon()), data2[1]); + if (PacketSize >= 2) { + data1[0] = NumTraits<Scalar>::quiet_NaN(); + data1[1] = NumTraits<Scalar>::epsilon(); + if (PacketTraits::HasLog) { + test::packet_helper<PacketTraits::HasLog, Packet> h; + h.store(data2, internal::plog(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + // TODO(cantonios): Re-enable for bfloat16. + if (!internal::is_same<Scalar, bfloat16>::value) { + VERIFY_IS_APPROX(std::log(data1[1]), data2[1]); + } + + data1[0] = -NumTraits<Scalar>::epsilon(); + data1[1] = Scalar(0); + h.store(data2, internal::plog(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + VERIFY_IS_EQUAL(std::log(Scalar(0)), data2[1]); + + data1[0] = (std::numeric_limits<Scalar>::min)(); + data1[1] = -(std::numeric_limits<Scalar>::min)(); + h.store(data2, internal::plog(h.load(data1))); + // TODO(cantonios): Re-enable for bfloat16. + if (!internal::is_same<Scalar, bfloat16>::value) { + VERIFY_IS_APPROX(std::log((std::numeric_limits<Scalar>::min)()), data2[0]); + } + VERIFY((numext::isnan)(data2[1])); + + // Note: 32-bit arm always flushes denorms to zero. +#if !EIGEN_ARCH_ARM + if (std::numeric_limits<Scalar>::has_denorm == std::denorm_present) { + data1[0] = std::numeric_limits<Scalar>::denorm_min(); + data1[1] = -std::numeric_limits<Scalar>::denorm_min(); + h.store(data2, internal::plog(h.load(data1))); + // TODO(rmlarsen): Reenable. + // VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::denorm_min()), data2[0]); + VERIFY((numext::isnan)(data2[1])); + } +#endif - data1[0] = -std::numeric_limits<Scalar>::epsilon(); - data1[1] = 0; - h.store(data2, internal::plog(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - VERIFY_IS_EQUAL(std::log(Scalar(0)), data2[1]); + data1[0] = Scalar(-1.0f); + h.store(data2, internal::plog(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); - data1[0] = (std::numeric_limits<Scalar>::min)(); - data1[1] = -(std::numeric_limits<Scalar>::min)(); - h.store(data2, internal::plog(h.load(data1))); - VERIFY_IS_EQUAL(std::log((std::numeric_limits<Scalar>::min)()), data2[0]); - VERIFY((numext::isnan)(data2[1])); + data1[0] = NumTraits<Scalar>::infinity(); + h.store(data2, internal::plog(h.load(data1))); + VERIFY((numext::isinf)(data2[0])); + } + if (PacketTraits::HasLog1p) { + test::packet_helper<PacketTraits::HasLog1p, Packet> h; + data1[0] = Scalar(-2); + data1[1] = -NumTraits<Scalar>::infinity(); + h.store(data2, internal::plog1p(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + VERIFY((numext::isnan)(data2[1])); + } + if (PacketTraits::HasSqrt) { + test::packet_helper<PacketTraits::HasSqrt, Packet> h; + data1[0] = Scalar(-1.0f); + if (std::numeric_limits<Scalar>::has_denorm == std::denorm_present) { + data1[1] = -std::numeric_limits<Scalar>::denorm_min(); + } else { + data1[1] = -NumTraits<Scalar>::epsilon(); + } + h.store(data2, internal::psqrt(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + VERIFY((numext::isnan)(data2[1])); + } + // TODO(rmlarsen): Re-enable for half and bfloat16. + if (PacketTraits::HasCos + && !internal::is_same<Scalar, half>::value + && !internal::is_same<Scalar, bfloat16>::value) { + test::packet_helper<PacketTraits::HasCos, Packet> h; + for (Scalar k = Scalar(1); k < Scalar(10000) / NumTraits<Scalar>::epsilon(); k *= Scalar(2)) { + for (int k1 = 0; k1 <= 1; ++k1) { + data1[0] = Scalar((2 * double(k) + k1) * double(EIGEN_PI) / 2 * internal::random<double>(0.8, 1.2)); + data1[1] = Scalar((2 * double(k) + 2 + k1) * double(EIGEN_PI) / 2 * internal::random<double>(0.8, 1.2)); + h.store(data2, internal::pcos(h.load(data1))); + h.store(data2 + PacketSize, internal::psin(h.load(data1))); + VERIFY(data2[0] <= Scalar(1.) && data2[0] >= Scalar(-1.)); + VERIFY(data2[1] <= Scalar(1.) && data2[1] >= Scalar(-1.)); + VERIFY(data2[PacketSize + 0] <= Scalar(1.) && data2[PacketSize + 0] >= Scalar(-1.)); + VERIFY(data2[PacketSize + 1] <= Scalar(1.) && data2[PacketSize + 1] >= Scalar(-1.)); + + VERIFY_IS_APPROX(data2[0], std::cos(data1[0])); + VERIFY_IS_APPROX(data2[1], std::cos(data1[1])); + VERIFY_IS_APPROX(data2[PacketSize + 0], std::sin(data1[0])); + VERIFY_IS_APPROX(data2[PacketSize + 1], std::sin(data1[1])); + + VERIFY_IS_APPROX(numext::abs2(data2[0]) + numext::abs2(data2[PacketSize + 0]), Scalar(1)); + VERIFY_IS_APPROX(numext::abs2(data2[1]) + numext::abs2(data2[PacketSize + 1]), Scalar(1)); + } + } + + data1[0] = NumTraits<Scalar>::infinity(); + data1[1] = -NumTraits<Scalar>::infinity(); + h.store(data2, internal::psin(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + VERIFY((numext::isnan)(data2[1])); + + h.store(data2, internal::pcos(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + VERIFY((numext::isnan)(data2[1])); + + data1[0] = NumTraits<Scalar>::quiet_NaN(); + h.store(data2, internal::psin(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + h.store(data2, internal::pcos(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + + data1[0] = -Scalar(0.); + h.store(data2, internal::psin(h.load(data1))); + VERIFY(internal::biteq(data2[0], data1[0])); + h.store(data2, internal::pcos(h.load(data1))); + VERIFY_IS_EQUAL(data2[0], Scalar(1)); + } + } +} - data1[0] = std::numeric_limits<Scalar>::denorm_min(); - data1[1] = -std::numeric_limits<Scalar>::denorm_min(); - h.store(data2, internal::plog(h.load(data1))); - // VERIFY_IS_EQUAL(std::log(std::numeric_limits<Scalar>::denorm_min()), data2[0]); - VERIFY((numext::isnan)(data2[1])); +#define CAST_CHECK_CWISE1_IF(COND, REFOP, POP, SCALAR, REFTYPE) if(COND) { \ + test::packet_helper<COND,Packet> h; \ + for (int i=0; i<PacketSize; ++i) \ + ref[i] = SCALAR(REFOP(static_cast<REFTYPE>(data1[i]))); \ + h.store(data2, POP(h.load(data1))); \ + VERIFY(test::areApprox(ref, data2, PacketSize) && #POP); \ +} - data1[0] = Scalar(-1.0f); - h.store(data2, internal::plog(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - h.store(data2, internal::psqrt(h.load(data1))); - VERIFY((numext::isnan)(data2[0])); - VERIFY((numext::isnan)(data2[1])); - } +template <typename Scalar> +Scalar propagate_nan_max(const Scalar& a, const Scalar& b) { + if ((numext::isnan)(a)) return a; + if ((numext::isnan)(b)) return b; + return (numext::maxi)(a,b); } -template<typename Scalar> void packetmath_notcomplex() -{ - using std::abs; - typedef internal::packet_traits<Scalar> PacketTraits; - typedef typename PacketTraits::type Packet; - const int PacketSize = PacketTraits::size; +template <typename Scalar> +Scalar propagate_nan_min(const Scalar& a, const Scalar& b) { + if ((numext::isnan)(a)) return a; + if ((numext::isnan)(b)) return b; + return (numext::mini)(a,b); +} - EIGEN_ALIGN_MAX Scalar data1[PacketTraits::size*4]; - EIGEN_ALIGN_MAX Scalar data2[PacketTraits::size*4]; - EIGEN_ALIGN_MAX Scalar ref[PacketTraits::size*4]; +template <typename Scalar> +Scalar propagate_number_max(const Scalar& a, const Scalar& b) { + if ((numext::isnan)(a)) return b; + if ((numext::isnan)(b)) return a; + return (numext::maxi)(a,b); +} - Array<Scalar,Dynamic,1>::Map(data1, PacketTraits::size*4).setRandom(); +template <typename Scalar> +Scalar propagate_number_min(const Scalar& a, const Scalar& b) { + if ((numext::isnan)(a)) return b; + if ((numext::isnan)(b)) return a; + return (numext::mini)(a,b); +} - ref[0] = data1[0]; - for (int i=0; i<PacketSize; ++i) - ref[0] = (std::min)(ref[0],data1[i]); - VERIFY(internal::isApprox(ref[0], internal::predux_min(internal::pload<Packet>(data1))) && "internal::predux_min"); +template <typename Scalar, typename Packet> +void packetmath_notcomplex() { + typedef internal::packet_traits<Scalar> PacketTraits; + const int PacketSize = internal::unpacket_traits<Packet>::size; + + EIGEN_ALIGN_MAX Scalar data1[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar data2[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar ref[PacketSize * 4]; + + Array<Scalar, Dynamic, 1>::Map(data1, PacketSize * 4).setRandom(); VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasMin); VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasMax); CHECK_CWISE2_IF(PacketTraits::HasMin, (std::min), internal::pmin); CHECK_CWISE2_IF(PacketTraits::HasMax, (std::max), internal::pmax); - CHECK_CWISE1(abs, internal::pabs); + + CHECK_CWISE2_IF(PacketTraits::HasMin, propagate_number_min, internal::pmin<PropagateNumbers>); + CHECK_CWISE2_IF(PacketTraits::HasMax, propagate_number_max, internal::pmax<PropagateNumbers>); + CHECK_CWISE1(numext::abs, internal::pabs); + CHECK_CWISE2_IF(PacketTraits::HasAbsDiff, REF_ABS_DIFF, internal::pabsdiff); ref[0] = data1[0]; - for (int i=0; i<PacketSize; ++i) - ref[0] = (std::max)(ref[0],data1[i]); + for (int i = 0; i < PacketSize; ++i) ref[0] = internal::pmin(ref[0], data1[i]); + VERIFY(internal::isApprox(ref[0], internal::predux_min(internal::pload<Packet>(data1))) && "internal::predux_min"); + ref[0] = data1[0]; + for (int i = 0; i < PacketSize; ++i) ref[0] = internal::pmax(ref[0], data1[i]); VERIFY(internal::isApprox(ref[0], internal::predux_max(internal::pload<Packet>(data1))) && "internal::predux_max"); - for (int i=0; i<PacketSize; ++i) - ref[i] = data1[0]+Scalar(i); + for (int i = 0; i < PacketSize; ++i) ref[i] = data1[0] + Scalar(i); internal::pstore(data2, internal::plset<Packet>(data1[0])); - VERIFY(areApprox(ref, data2, PacketSize) && "internal::plset"); + VERIFY(test::areApprox(ref, data2, PacketSize) && "internal::plset"); + + { + unsigned char* data1_bits = reinterpret_cast<unsigned char*>(data1); + // predux_all - not needed yet + // for (unsigned int i=0; i<PacketSize*sizeof(Scalar); ++i) data1_bits[i] = 0xff; + // VERIFY(internal::predux_all(internal::pload<Packet>(data1)) && "internal::predux_all(1111)"); + // for(int k=0; k<PacketSize; ++k) + // { + // for (unsigned int i=0; i<sizeof(Scalar); ++i) data1_bits[k*sizeof(Scalar)+i] = 0x0; + // VERIFY( (!internal::predux_all(internal::pload<Packet>(data1))) && "internal::predux_all(0101)"); + // for (unsigned int i=0; i<sizeof(Scalar); ++i) data1_bits[k*sizeof(Scalar)+i] = 0xff; + // } + + // predux_any + for (unsigned int i = 0; i < PacketSize * sizeof(Scalar); ++i) data1_bits[i] = 0x0; + VERIFY((!internal::predux_any(internal::pload<Packet>(data1))) && "internal::predux_any(0000)"); + for (int k = 0; k < PacketSize; ++k) { + for (unsigned int i = 0; i < sizeof(Scalar); ++i) data1_bits[k * sizeof(Scalar) + i] = 0xff; + VERIFY(internal::predux_any(internal::pload<Packet>(data1)) && "internal::predux_any(0101)"); + for (unsigned int i = 0; i < sizeof(Scalar); ++i) data1_bits[k * sizeof(Scalar) + i] = 0x00; + } + } + + + // Test NaN propagation. + if (!NumTraits<Scalar>::IsInteger) { + // Test reductions with no NaNs. + ref[0] = data1[0]; + for (int i = 0; i < PacketSize; ++i) ref[0] = internal::pmin<PropagateNumbers>(ref[0], data1[i]); + VERIFY(internal::isApprox(ref[0], internal::predux_min<PropagateNumbers>(internal::pload<Packet>(data1))) && "internal::predux_min<PropagateNumbers>"); + ref[0] = data1[0]; + for (int i = 0; i < PacketSize; ++i) ref[0] = internal::pmin<PropagateNaN>(ref[0], data1[i]); + VERIFY(internal::isApprox(ref[0], internal::predux_min<PropagateNaN>(internal::pload<Packet>(data1))) && "internal::predux_min<PropagateNaN>"); + ref[0] = data1[0]; + for (int i = 0; i < PacketSize; ++i) ref[0] = internal::pmax<PropagateNumbers>(ref[0], data1[i]); + VERIFY(internal::isApprox(ref[0], internal::predux_max<PropagateNumbers>(internal::pload<Packet>(data1))) && "internal::predux_max<PropagateNumbers>"); + ref[0] = data1[0]; + for (int i = 0; i < PacketSize; ++i) ref[0] = internal::pmax<PropagateNaN>(ref[0], data1[i]); + VERIFY(internal::isApprox(ref[0], internal::predux_max<PropagateNaN>(internal::pload<Packet>(data1))) && "internal::predux_max<PropagateNumbers>"); + // A single NaN. + const size_t index = std::numeric_limits<size_t>::quiet_NaN() % PacketSize; + data1[index] = NumTraits<Scalar>::quiet_NaN(); + VERIFY(PacketSize==1 || !(numext::isnan)(internal::predux_min<PropagateNumbers>(internal::pload<Packet>(data1)))); + VERIFY((numext::isnan)(internal::predux_min<PropagateNaN>(internal::pload<Packet>(data1)))); + VERIFY(PacketSize==1 || !(numext::isnan)(internal::predux_max<PropagateNumbers>(internal::pload<Packet>(data1)))); + VERIFY((numext::isnan)(internal::predux_max<PropagateNaN>(internal::pload<Packet>(data1)))); + // All NaNs. + for (int i = 0; i < 4 * PacketSize; ++i) data1[i] = NumTraits<Scalar>::quiet_NaN(); + VERIFY((numext::isnan)(internal::predux_min<PropagateNumbers>(internal::pload<Packet>(data1)))); + VERIFY((numext::isnan)(internal::predux_min<PropagateNaN>(internal::pload<Packet>(data1)))); + VERIFY((numext::isnan)(internal::predux_max<PropagateNumbers>(internal::pload<Packet>(data1)))); + VERIFY((numext::isnan)(internal::predux_max<PropagateNaN>(internal::pload<Packet>(data1)))); + + // Test NaN propagation for coefficient-wise min and max. + for (int i = 0; i < PacketSize; ++i) { + data1[i] = internal::random<bool>() ? NumTraits<Scalar>::quiet_NaN() : Scalar(0); + data1[i + PacketSize] = internal::random<bool>() ? NumTraits<Scalar>::quiet_NaN() : Scalar(0); + } + // Note: NaN propagation is implementation defined for pmin/pmax, so we do not test it here. + CHECK_CWISE2_IF(PacketTraits::HasMin, propagate_number_min, (internal::pmin<PropagateNumbers>)); + CHECK_CWISE2_IF(PacketTraits::HasMax, propagate_number_max, internal::pmax<PropagateNumbers>); + CHECK_CWISE2_IF(PacketTraits::HasMin, propagate_nan_min, (internal::pmin<PropagateNaN>)); + CHECK_CWISE2_IF(PacketTraits::HasMax, propagate_nan_max, internal::pmax<PropagateNaN>); + } + + packetmath_boolean_mask_ops_notcomplex<Scalar, Packet>(); } -template<typename Scalar,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar* data1, Scalar* data2, Scalar* ref, Scalar* pval) -{ - typedef internal::packet_traits<Scalar> PacketTraits; - typedef typename PacketTraits::type Packet; - const int PacketSize = PacketTraits::size; +template <typename Scalar, typename Packet, bool ConjLhs, bool ConjRhs> +void test_conj_helper(Scalar* data1, Scalar* data2, Scalar* ref, Scalar* pval) { + const int PacketSize = internal::unpacket_traits<Packet>::size; internal::conj_if<ConjLhs> cj0; internal::conj_if<ConjRhs> cj1; - internal::conj_helper<Scalar,Scalar,ConjLhs,ConjRhs> cj; - internal::conj_helper<Packet,Packet,ConjLhs,ConjRhs> pcj; + internal::conj_helper<Scalar, Scalar, ConjLhs, ConjRhs> cj; + internal::conj_helper<Packet, Packet, ConjLhs, ConjRhs> pcj; - for(int i=0;i<PacketSize;++i) - { + for (int i = 0; i < PacketSize; ++i) { ref[i] = cj0(data1[i]) * cj1(data2[i]); - VERIFY(internal::isApprox(ref[i], cj.pmul(data1[i],data2[i])) && "conj_helper pmul"); + VERIFY(internal::isApprox(ref[i], cj.pmul(data1[i], data2[i])) && "conj_helper pmul"); } - internal::pstore(pval,pcj.pmul(internal::pload<Packet>(data1),internal::pload<Packet>(data2))); - VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmul"); + internal::pstore(pval, pcj.pmul(internal::pload<Packet>(data1), internal::pload<Packet>(data2))); + VERIFY(test::areApprox(ref, pval, PacketSize) && "conj_helper pmul"); - for(int i=0;i<PacketSize;++i) - { + for (int i = 0; i < PacketSize; ++i) { Scalar tmp = ref[i]; ref[i] += cj0(data1[i]) * cj1(data2[i]); - VERIFY(internal::isApprox(ref[i], cj.pmadd(data1[i],data2[i],tmp)) && "conj_helper pmadd"); + VERIFY(internal::isApprox(ref[i], cj.pmadd(data1[i], data2[i], tmp)) && "conj_helper pmadd"); } - internal::pstore(pval,pcj.pmadd(internal::pload<Packet>(data1),internal::pload<Packet>(data2),internal::pload<Packet>(pval))); - VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmadd"); + internal::pstore( + pval, pcj.pmadd(internal::pload<Packet>(data1), internal::pload<Packet>(data2), internal::pload<Packet>(pval))); + VERIFY(test::areApprox(ref, pval, PacketSize) && "conj_helper pmadd"); } -template<typename Scalar> void packetmath_complex() -{ +template <typename Scalar, typename Packet> +void packetmath_complex() { typedef internal::packet_traits<Scalar> PacketTraits; - typedef typename PacketTraits::type Packet; - const int PacketSize = PacketTraits::size; + typedef typename Scalar::value_type RealScalar; + const int PacketSize = internal::unpacket_traits<Packet>::size; - const int size = PacketSize*4; - EIGEN_ALIGN_MAX Scalar data1[PacketSize*4]; - EIGEN_ALIGN_MAX Scalar data2[PacketSize*4]; - EIGEN_ALIGN_MAX Scalar ref[PacketSize*4]; - EIGEN_ALIGN_MAX Scalar pval[PacketSize*4]; + const int size = PacketSize * 4; + EIGEN_ALIGN_MAX Scalar data1[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar data2[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar ref[PacketSize * 4]; + EIGEN_ALIGN_MAX Scalar pval[PacketSize * 4]; - for (int i=0; i<size; ++i) - { + for (int i = 0; i < size; ++i) { data1[i] = internal::random<Scalar>() * Scalar(1e2); data2[i] = internal::random<Scalar>() * Scalar(1e2); } - test_conj_helper<Scalar,false,false> (data1,data2,ref,pval); - test_conj_helper<Scalar,false,true> (data1,data2,ref,pval); - test_conj_helper<Scalar,true,false> (data1,data2,ref,pval); - test_conj_helper<Scalar,true,true> (data1,data2,ref,pval); + test_conj_helper<Scalar, Packet, false, false>(data1, data2, ref, pval); + test_conj_helper<Scalar, Packet, false, true>(data1, data2, ref, pval); + test_conj_helper<Scalar, Packet, true, false>(data1, data2, ref, pval); + test_conj_helper<Scalar, Packet, true, true>(data1, data2, ref, pval); + // Test pcplxflip. { - for(int i=0;i<PacketSize;++i) - ref[i] = Scalar(std::imag(data1[i]),std::real(data1[i])); - internal::pstore(pval,internal::pcplxflip(internal::pload<Packet>(data1))); - VERIFY(areApprox(ref, pval, PacketSize) && "pcplxflip"); + for (int i = 0; i < PacketSize; ++i) ref[i] = Scalar(std::imag(data1[i]), std::real(data1[i])); + internal::pstore(pval, internal::pcplxflip(internal::pload<Packet>(data1))); + VERIFY(test::areApprox(ref, pval, PacketSize) && "pcplxflip"); + } + + if (PacketTraits::HasSqrt) { + for (int i = 0; i < size; ++i) { + data1[i] = Scalar(internal::random<RealScalar>(), internal::random<RealScalar>()); + } + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, size); + + // Test misc. corner cases. + const RealScalar zero = RealScalar(0); + const RealScalar one = RealScalar(1); + const RealScalar inf = std::numeric_limits<RealScalar>::infinity(); + const RealScalar nan = std::numeric_limits<RealScalar>::quiet_NaN(); + data1[0] = Scalar(zero, zero); + data1[1] = Scalar(-zero, zero); + data1[2] = Scalar(one, zero); + data1[3] = Scalar(zero, one); + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, 4); + data1[0] = Scalar(-one, zero); + data1[1] = Scalar(zero, -one); + data1[2] = Scalar(one, one); + data1[3] = Scalar(-one, -one); + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, 4); + data1[0] = Scalar(inf, zero); + data1[1] = Scalar(zero, inf); + data1[2] = Scalar(-inf, zero); + data1[3] = Scalar(zero, -inf); + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, 4); + data1[0] = Scalar(inf, inf); + data1[1] = Scalar(-inf, inf); + data1[2] = Scalar(inf, -inf); + data1[3] = Scalar(-inf, -inf); + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, 4); + data1[0] = Scalar(nan, zero); + data1[1] = Scalar(zero, nan); + data1[2] = Scalar(nan, one); + data1[3] = Scalar(one, nan); + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, 4); + data1[0] = Scalar(nan, nan); + data1[1] = Scalar(inf, nan); + data1[2] = Scalar(nan, inf); + data1[3] = Scalar(-inf, nan); + CHECK_CWISE1_N(numext::sqrt, internal::psqrt, 4); } } -template<typename Scalar> void packetmath_scatter_gather() -{ - typedef internal::packet_traits<Scalar> PacketTraits; - typedef typename PacketTraits::type Packet; +template <typename Scalar, typename Packet> +void packetmath_scatter_gather() { typedef typename NumTraits<Scalar>::Real RealScalar; - const int PacketSize = PacketTraits::size; + const int PacketSize = internal::unpacket_traits<Packet>::size; EIGEN_ALIGN_MAX Scalar data1[PacketSize]; - RealScalar refvalue = 0; - for (int i=0; i<PacketSize; ++i) { - data1[i] = internal::random<Scalar>()/RealScalar(PacketSize); + RealScalar refvalue = RealScalar(0); + for (int i = 0; i < PacketSize; ++i) { + data1[i] = internal::random<Scalar>() / RealScalar(PacketSize); } - int stride = internal::random<int>(1,20); + int stride = internal::random<int>(1, 20); + + // Buffer of zeros. + EIGEN_ALIGN_MAX Scalar buffer[PacketSize * 20] = {}; - EIGEN_ALIGN_MAX Scalar buffer[PacketSize*20]; - memset(buffer, 0, 20*PacketSize*sizeof(Scalar)); Packet packet = internal::pload<Packet>(data1); internal::pscatter<Scalar, Packet>(buffer, packet, stride); - for (int i = 0; i < PacketSize*20; ++i) { - if ((i%stride) == 0 && i<stride*PacketSize) { - VERIFY(isApproxAbs(buffer[i], data1[i/stride], refvalue) && "pscatter"); + for (int i = 0; i < PacketSize * 20; ++i) { + if ((i % stride) == 0 && i < stride * PacketSize) { + VERIFY(test::isApproxAbs(buffer[i], data1[i / stride], refvalue) && "pscatter"); } else { - VERIFY(isApproxAbs(buffer[i], Scalar(0), refvalue) && "pscatter"); + VERIFY(test::isApproxAbs(buffer[i], Scalar(0), refvalue) && "pscatter"); } } - for (int i=0; i<PacketSize*7; ++i) { - buffer[i] = internal::random<Scalar>()/RealScalar(PacketSize); + for (int i = 0; i < PacketSize * 7; ++i) { + buffer[i] = internal::random<Scalar>() / RealScalar(PacketSize); } packet = internal::pgather<Scalar, Packet>(buffer, 7); internal::pstore(data1, packet); for (int i = 0; i < PacketSize; ++i) { - VERIFY(isApproxAbs(data1[i], buffer[i*7], refvalue) && "pgather"); + VERIFY(test::isApproxAbs(data1[i], buffer[i * 7], refvalue) && "pgather"); } } -void test_packetmath() -{ - for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( packetmath<float>() ); - CALL_SUBTEST_2( packetmath<double>() ); - CALL_SUBTEST_3( packetmath<int>() ); - CALL_SUBTEST_4( packetmath<std::complex<float> >() ); - CALL_SUBTEST_5( packetmath<std::complex<double> >() ); - - CALL_SUBTEST_1( packetmath_notcomplex<float>() ); - CALL_SUBTEST_2( packetmath_notcomplex<double>() ); - CALL_SUBTEST_3( packetmath_notcomplex<int>() ); - - CALL_SUBTEST_1( packetmath_real<float>() ); - CALL_SUBTEST_2( packetmath_real<double>() ); - - CALL_SUBTEST_4( packetmath_complex<std::complex<float> >() ); - CALL_SUBTEST_5( packetmath_complex<std::complex<double> >() ); - - CALL_SUBTEST_1( packetmath_scatter_gather<float>() ); - CALL_SUBTEST_2( packetmath_scatter_gather<double>() ); - CALL_SUBTEST_3( packetmath_scatter_gather<int>() ); - CALL_SUBTEST_4( packetmath_scatter_gather<std::complex<float> >() ); - CALL_SUBTEST_5( packetmath_scatter_gather<std::complex<double> >() ); +namespace Eigen { +namespace test { + +template <typename Scalar, typename PacketType> +struct runall<Scalar, PacketType, false, false> { // i.e. float or double + static void run() { + packetmath<Scalar, PacketType>(); + packetmath_scatter_gather<Scalar, PacketType>(); + packetmath_notcomplex<Scalar, PacketType>(); + packetmath_real<Scalar, PacketType>(); + } +}; + +template <typename Scalar, typename PacketType> +struct runall<Scalar, PacketType, false, true> { // i.e. int + static void run() { + packetmath<Scalar, PacketType>(); + packetmath_scatter_gather<Scalar, PacketType>(); + packetmath_notcomplex<Scalar, PacketType>(); + } +}; + +template <typename Scalar, typename PacketType> +struct runall<Scalar, PacketType, true, false> { // i.e. complex + static void run() { + packetmath<Scalar, PacketType>(); + packetmath_scatter_gather<Scalar, PacketType>(); + packetmath_complex<Scalar, PacketType>(); + } +}; + +} // namespace test +} // namespace Eigen + +EIGEN_DECLARE_TEST(packetmath) { + g_first_pass = true; + for (int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(test::runner<float>::run()); + CALL_SUBTEST_2(test::runner<double>::run()); + CALL_SUBTEST_3(test::runner<int8_t>::run()); + CALL_SUBTEST_4(test::runner<uint8_t>::run()); + CALL_SUBTEST_5(test::runner<int16_t>::run()); + CALL_SUBTEST_6(test::runner<uint16_t>::run()); + CALL_SUBTEST_7(test::runner<int32_t>::run()); + CALL_SUBTEST_8(test::runner<uint32_t>::run()); + CALL_SUBTEST_9(test::runner<int64_t>::run()); + CALL_SUBTEST_10(test::runner<uint64_t>::run()); + CALL_SUBTEST_11(test::runner<std::complex<float> >::run()); + CALL_SUBTEST_12(test::runner<std::complex<double> >::run()); + CALL_SUBTEST_13(test::runner<half>::run()); + CALL_SUBTEST_14((packetmath<bool, internal::packet_traits<bool>::type>())); + CALL_SUBTEST_15(test::runner<bfloat16>::run()); + g_first_pass = false; } } diff --git a/test/packetmath_test_shared.h b/test/packetmath_test_shared.h new file mode 100644 index 000000000..8624fe2fe --- /dev/null +++ b/test/packetmath_test_shared.h @@ -0,0 +1,275 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> +// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include <typeinfo> + +#if defined __GNUC__ && __GNUC__>=6 + #pragma GCC diagnostic ignored "-Wignored-attributes" +#endif +// using namespace Eigen; + +bool g_first_pass = true; + +namespace Eigen { +namespace internal { + +template<typename T> T negate(const T& x) { return -x; } + +template<typename T> +Map<const Array<unsigned char,sizeof(T),1> > +bits(const T& x) { + return Map<const Array<unsigned char,sizeof(T),1> >(reinterpret_cast<const unsigned char *>(&x)); +} + +// The following implement bitwise operations on floating point types +template<typename T,typename Bits,typename Func> +T apply_bit_op(Bits a, Bits b, Func f) { + Array<unsigned char,sizeof(T),1> data; + T res; + for(Index i = 0; i < data.size(); ++i) + data[i] = f(a[i], b[i]); + // Note: The reinterpret_cast works around GCC's class-memaccess warnings: + std::memcpy(reinterpret_cast<unsigned char*>(&res), data.data(), sizeof(T)); + return res; +} + +#define EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,T) \ + template<> T EIGEN_CAT(p,OP)(const T& a,const T& b) { \ + return apply_bit_op<T>(bits(a),bits(b),FUNC); \ + } + +#define EIGEN_TEST_MAKE_BITWISE(OP,FUNC) \ + EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,float) \ + EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,double) \ + EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,half) \ + EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,bfloat16) \ + EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,std::complex<float>) \ + EIGEN_TEST_MAKE_BITWISE2(OP,FUNC,std::complex<double>) + +EIGEN_TEST_MAKE_BITWISE(xor,std::bit_xor<unsigned char>()) +EIGEN_TEST_MAKE_BITWISE(and,std::bit_and<unsigned char>()) +EIGEN_TEST_MAKE_BITWISE(or, std::bit_or<unsigned char>()) +struct bit_andnot{ + template<typename T> T + operator()(T a, T b) const { return a & (~b); } +}; +EIGEN_TEST_MAKE_BITWISE(andnot, bit_andnot()) +template<typename T> +bool biteq(T a, T b) { + return (bits(a) == bits(b)).all(); +} + +} + +namespace test { + +// NOTE: we disable inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU. +template<typename Scalar> EIGEN_DONT_INLINE +bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue) +{ + return internal::isMuchSmallerThan(a-b, refvalue); +} + +template<typename Scalar> +inline void print_mismatch(const Scalar* ref, const Scalar* vec, int size) { + std::cout << "ref: [" << Map<const Matrix<Scalar,1,Dynamic> >(ref,size) << "]" << " != vec: [" << Map<const Matrix<Scalar,1,Dynamic> >(vec,size) << "]\n"; +} + +template<typename Scalar> bool areApproxAbs(const Scalar* a, const Scalar* b, int size, const typename NumTraits<Scalar>::Real& refvalue) +{ + for (int i=0; i<size; ++i) + { + if (!isApproxAbs(a[i],b[i],refvalue)) + { + print_mismatch(a, b, size); + return false; + } + } + return true; +} + +template<typename Scalar> bool areApprox(const Scalar* a, const Scalar* b, int size) +{ + for (int i=0; i<size; ++i) + { + if ( a[i]!=b[i] && !internal::isApprox(a[i],b[i]) + && !((numext::isnan)(a[i]) && (numext::isnan)(b[i])) ) + { + print_mismatch(a, b, size); + return false; + } + } + return true; +} + +template<typename Scalar> bool areEqual(const Scalar* a, const Scalar* b, int size) +{ + for (int i=0; i<size; ++i) + { + if ( (a[i] != b[i]) && !((numext::isnan)(a[i]) && (numext::isnan)(b[i])) ) + { + print_mismatch(a, b, size); + return false; + } + } + return true; +} + +#define CHECK_CWISE1(REFOP, POP) { \ + for (int i=0; i<PacketSize; ++i) \ + ref[i] = REFOP(data1[i]); \ + internal::pstore(data2, POP(internal::pload<Packet>(data1))); \ + VERIFY(test::areApprox(ref, data2, PacketSize) && #POP); \ +} + +// Checks component-wise for input of size N. All of data1, data2, and ref +// should have size at least ceil(N/PacketSize)*PacketSize to avoid memory +// access errors. +#define CHECK_CWISE1_N(REFOP, POP, N) { \ + for (int i=0; i<N; ++i) \ + ref[i] = REFOP(data1[i]); \ + for (int j=0; j<N; j+=PacketSize) \ + internal::pstore(data2 + j, POP(internal::pload<Packet>(data1 + j))); \ + VERIFY(test::areApprox(ref, data2, N) && #POP); \ +} + +template<bool Cond,typename Packet> +struct packet_helper +{ + template<typename T> + inline Packet load(const T* from) const { return internal::pload<Packet>(from); } + + template<typename T> + inline Packet loadu(const T* from) const { return internal::ploadu<Packet>(from); } + + template<typename T> + inline Packet load(const T* from, unsigned long long umask) const { return internal::ploadu<Packet>(from, umask); } + + template<typename T> + inline void store(T* to, const Packet& x) const { internal::pstore(to,x); } + + template<typename T> + inline void store(T* to, const Packet& x, unsigned long long umask) const { internal::pstoreu(to, x, umask); } + + template<typename T> + inline Packet& forward_reference(Packet& packet, T& /*scalar*/) const { return packet; } +}; + +template<typename Packet> +struct packet_helper<false,Packet> +{ + template<typename T> + inline T load(const T* from) const { return *from; } + + template<typename T> + inline T loadu(const T* from) const { return *from; } + + template<typename T> + inline T load(const T* from, unsigned long long) const { return *from; } + + template<typename T> + inline void store(T* to, const T& x) const { *to = x; } + + template<typename T> + inline void store(T* to, const T& x, unsigned long long) const { *to = x; } + + template<typename T> + inline T& forward_reference(Packet& /*packet*/, T& scalar) const { return scalar; } +}; + +#define CHECK_CWISE1_IF(COND, REFOP, POP) if(COND) { \ + test::packet_helper<COND,Packet> h; \ + for (int i=0; i<PacketSize; ++i) \ + ref[i] = Scalar(REFOP(data1[i])); \ + h.store(data2, POP(h.load(data1))); \ + VERIFY(test::areApprox(ref, data2, PacketSize) && #POP); \ +} + +#define CHECK_CWISE1_EXACT_IF(COND, REFOP, POP) if(COND) { \ + test::packet_helper<COND,Packet> h; \ + for (int i=0; i<PacketSize; ++i) \ + ref[i] = Scalar(REFOP(data1[i])); \ + h.store(data2, POP(h.load(data1))); \ + VERIFY(test::areEqual(ref, data2, PacketSize) && #POP); \ +} + +#define CHECK_CWISE2_IF(COND, REFOP, POP) if(COND) { \ + test::packet_helper<COND,Packet> h; \ + for (int i=0; i<PacketSize; ++i) \ + ref[i] = Scalar(REFOP(data1[i], data1[i+PacketSize])); \ + h.store(data2, POP(h.load(data1),h.load(data1+PacketSize))); \ + VERIFY(test::areApprox(ref, data2, PacketSize) && #POP); \ +} + +// One input, one output by reference. +#define CHECK_CWISE1_BYREF1_IF(COND, REFOP, POP) if(COND) { \ + test::packet_helper<COND,Packet> h; \ + for (int i=0; i<PacketSize; ++i) \ + ref[i] = Scalar(REFOP(data1[i], ref[i+PacketSize])); \ + Packet pout; \ + Scalar sout; \ + h.store(data2, POP(h.load(data1), h.forward_reference(pout, sout))); \ + h.store(data2+PacketSize, h.forward_reference(pout, sout)); \ + VERIFY(test::areApprox(ref, data2, 2 * PacketSize) && #POP); \ +} + +#define CHECK_CWISE3_IF(COND, REFOP, POP) if (COND) { \ + test::packet_helper<COND, Packet> h; \ + for (int i = 0; i < PacketSize; ++i) \ + ref[i] = Scalar(REFOP(data1[i], data1[i + PacketSize], \ + data1[i + 2 * PacketSize])); \ + h.store(data2, POP(h.load(data1), h.load(data1 + PacketSize), \ + h.load(data1 + 2 * PacketSize))); \ + VERIFY(test::areApprox(ref, data2, PacketSize) && #POP); \ +} + +// Specialize the runall struct in your test file by defining run(). +template< + typename Scalar, + typename PacketType, + bool IsComplex = NumTraits<Scalar>::IsComplex, + bool IsInteger = NumTraits<Scalar>::IsInteger> +struct runall; + +template< + typename Scalar, + typename PacketType = typename internal::packet_traits<Scalar>::type, + bool Vectorized = internal::packet_traits<Scalar>::Vectorizable, + bool HasHalf = !internal::is_same<typename internal::unpacket_traits<PacketType>::half,PacketType>::value > +struct runner; + +template<typename Scalar,typename PacketType> +struct runner<Scalar,PacketType,true,true> +{ + static void run() { + runall<Scalar,PacketType>::run(); + runner<Scalar,typename internal::unpacket_traits<PacketType>::half>::run(); + } +}; + +template<typename Scalar,typename PacketType> +struct runner<Scalar,PacketType,true,false> +{ + static void run() { + runall<Scalar,PacketType>::run(); + } +}; + +template<typename Scalar,typename PacketType> +struct runner<Scalar,PacketType,false,false> +{ + static void run() { + runall<Scalar,PacketType>::run(); + } +}; + +} +} diff --git a/test/pardiso_support.cpp b/test/pardiso_support.cpp index 67efad6d8..9c16ded5b 100644 --- a/test/pardiso_support.cpp +++ b/test/pardiso_support.cpp @@ -20,7 +20,7 @@ template<typename T> void test_pardiso_T() check_sparse_square_solving(pardiso_lu); } -void test_pardiso_support() +EIGEN_DECLARE_TEST(pardiso_support) { CALL_SUBTEST_1(test_pardiso_T<float>()); CALL_SUBTEST_2(test_pardiso_T<double>()); diff --git a/test/pastix_support.cpp b/test/pastix_support.cpp index b62f85739..9b64417c1 100644 --- a/test/pastix_support.cpp +++ b/test/pastix_support.cpp @@ -45,7 +45,7 @@ template<typename T> void test_pastix_T_LU() check_sparse_square_solving(pastix_lu); } -void test_pastix_support() +EIGEN_DECLARE_TEST(pastix_support) { CALL_SUBTEST_1(test_pastix_T<float>()); CALL_SUBTEST_2(test_pastix_T<double>()); diff --git a/test/permutationmatrices.cpp b/test/permutationmatrices.cpp index db1266579..d4b68b2d4 100644 --- a/test/permutationmatrices.cpp +++ b/test/permutationmatrices.cpp @@ -14,14 +14,15 @@ using namespace std; template<typename MatrixType> void permutationmatrices(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime, Options = MatrixType::Options }; typedef PermutationMatrix<Rows> LeftPermutationType; + typedef Transpositions<Rows> LeftTranspositionsType; typedef Matrix<int, Rows, 1> LeftPermutationVectorType; typedef Map<LeftPermutationType> MapLeftPerm; typedef PermutationMatrix<Cols> RightPermutationType; + typedef Transpositions<Cols> RightTranspositionsType; typedef Matrix<int, Cols, 1> RightPermutationVectorType; typedef Map<RightPermutationType> MapRightPerm; @@ -35,6 +36,8 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m) RightPermutationVectorType rv; randomPermutationVector(rv, cols); RightPermutationType rp(rv); + LeftTranspositionsType lt(lv); + RightTranspositionsType rt(rv); MatrixType m_permuted = MatrixType::Random(rows,cols); VERIFY_EVALUATION_COUNT(m_permuted = lp * m_original * rp, 1); // 1 temp for sub expression "lp * m_original" @@ -51,7 +54,11 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m) m_permuted = m_original; VERIFY_EVALUATION_COUNT(m_permuted = lp * m_permuted * rp, 1); VERIFY_IS_APPROX(m_permuted, lm*m_original*rm); - + + LeftPermutationType lpi; + lpi = lp.inverse(); + VERIFY_IS_APPROX(lpi*m_permuted,lp.inverse()*m_permuted); + VERIFY_IS_APPROX(lp.inverse()*m_permuted*rp.inverse(), m_original); VERIFY_IS_APPROX(lv.asPermutation().inverse()*m_permuted*rv.asPermutation().inverse(), m_original); VERIFY_IS_APPROX(MapLeftPerm(lv.data(),lv.size()).inverse()*m_permuted*MapRightPerm(rv.data(),rv.size()).inverse(), m_original); @@ -115,6 +122,24 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m) Matrix<Scalar, Cols, Cols> B = rp.transpose(); VERIFY_IS_APPROX(A, B.transpose()); } + + m_permuted = m_original; + lp = lt; + rp = rt; + VERIFY_EVALUATION_COUNT(m_permuted = lt * m_permuted * rt, 1); + VERIFY_IS_APPROX(m_permuted, lp*m_original*rp.transpose()); + + VERIFY_IS_APPROX(lt.inverse()*m_permuted*rt.inverse(), m_original); + + // Check inplace transpositions + m_permuted = m_original; + VERIFY_IS_APPROX(m_permuted = lt * m_permuted, lp * m_original); + m_permuted = m_original; + VERIFY_IS_APPROX(m_permuted = lt.inverse() * m_permuted, lp.inverse() * m_original); + m_permuted = m_original; + VERIFY_IS_APPROX(m_permuted = m_permuted * rt, m_original * rt); + m_permuted = m_original; + VERIFY_IS_APPROX(m_permuted = m_permuted * rt.inverse(), m_original * rt.inverse()); } template<typename T> @@ -136,12 +161,12 @@ void bug890() MapType(v1.data(),2,1,S(1,1)) = P * MapType(rhs.data(),2,1,S(1,1)); VERIFY_IS_APPROX(v1, (P * rhs).eval()); - + MapType(v1.data(),2,1,S(1,1)) = P.inverse() * MapType(rhs.data(),2,1,S(1,1)); VERIFY_IS_APPROX(v1, (P.inverse() * rhs).eval()); } -void test_permutationmatrices() +EIGEN_DECLARE_TEST(permutationmatrices) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( permutationmatrices(Matrix<float, 1, 1>()) ); @@ -149,8 +174,8 @@ void test_permutationmatrices() CALL_SUBTEST_3( permutationmatrices(Matrix<double,3,3,RowMajor>()) ); CALL_SUBTEST_4( permutationmatrices(Matrix4d()) ); CALL_SUBTEST_5( permutationmatrices(Matrix<double,40,60>()) ); - CALL_SUBTEST_6( permutationmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(20, 30)) ); - CALL_SUBTEST_7( permutationmatrices(MatrixXcf(15, 10)) ); + CALL_SUBTEST_6( permutationmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( permutationmatrices(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); } CALL_SUBTEST_5( bug890<double>() ); } diff --git a/test/prec_inverse_4x4.cpp b/test/prec_inverse_4x4.cpp index eb6ad18c9..86f057118 100644 --- a/test/prec_inverse_4x4.cpp +++ b/test/prec_inverse_4x4.cpp @@ -30,18 +30,17 @@ template<typename MatrixType> void inverse_general_4x4(int repeat) { using std::abs; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; double error_sum = 0., error_max = 0.; for(int i = 0; i < repeat; ++i) { MatrixType m; - RealScalar absdet; + bool is_invertible; do { m = MatrixType::Random(); - absdet = abs(m.determinant()); - } while(absdet < NumTraits<Scalar>::epsilon()); + is_invertible = Eigen::FullPivLU<MatrixType>(m).isInvertible(); + } while(!is_invertible); MatrixType inv = m.inverse(); - double error = double( (m*inv-MatrixType::Identity()).norm() * absdet / NumTraits<Scalar>::epsilon() ); + double error = double( (m*inv-MatrixType::Identity()).norm()); error_sum += error; error_max = (std::max)(error_max, error); } @@ -68,7 +67,7 @@ template<typename MatrixType> void inverse_general_4x4(int repeat) } } -void test_prec_inverse_4x4() +EIGEN_DECLARE_TEST(prec_inverse_4x4) { CALL_SUBTEST_1((inverse_permutation_4x4<Matrix4f>())); CALL_SUBTEST_1(( inverse_general_4x4<Matrix4f>(200000 * g_repeat) )); diff --git a/test/product.h b/test/product.h index 3b6511270..c6c78fbd8 100644 --- a/test/product.h +++ b/test/product.h @@ -111,6 +111,17 @@ template<typename MatrixType> void product(const MatrixType& m) vcres.noalias() -= m1.transpose() * v1; VERIFY_IS_APPROX(vcres, vc2 - m1.transpose() * v1); + // test scaled products + res = square; + res.noalias() = s1 * m1 * m2.transpose(); + VERIFY_IS_APPROX(res, ((s1*m1).eval() * m2.transpose())); + res = square; + res.noalias() += s1 * m1 * m2.transpose(); + VERIFY_IS_APPROX(res, square + ((s1*m1).eval() * m2.transpose())); + res = square; + res.noalias() -= s1 * m1 * m2.transpose(); + VERIFY_IS_APPROX(res, square - ((s1*m1).eval() * m2.transpose())); + // test d ?= a+b*c rules res.noalias() = square + m1 * m2.transpose(); VERIFY_IS_APPROX(res, square + m1 * m2.transpose()); @@ -216,6 +227,8 @@ template<typename MatrixType> void product(const MatrixType& m) // CwiseBinaryOp VERIFY_IS_APPROX(x = y + A*x, A*z); x = z; + VERIFY_IS_APPROX(x = y - A*x, A*(-z)); + x = z; // CwiseUnaryOp VERIFY_IS_APPROX(x = Scalar(1.)*(A*x), A*z); } @@ -228,4 +241,19 @@ template<typename MatrixType> void product(const MatrixType& m) VERIFY_IS_APPROX(square * (square*square).conjugate(), square * square.conjugate() * square.conjugate()); } + // destination with a non-default inner-stride + // see bug 1741 + if(!MatrixType::IsRowMajor) + { + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; + MatrixX buffer(2*rows,2*rows); + Map<RowSquareMatrixType,0,Stride<Dynamic,2> > map1(buffer.data(),rows,rows,Stride<Dynamic,2>(2*rows,2)); + buffer.setZero(); + VERIFY_IS_APPROX(map1 = m1 * m2.transpose(), (m1 * m2.transpose()).eval()); + buffer.setZero(); + VERIFY_IS_APPROX(map1.noalias() = m1 * m2.transpose(), (m1 * m2.transpose()).eval()); + buffer.setZero(); + VERIFY_IS_APPROX(map1.noalias() += m1 * m2.transpose(), (m1 * m2.transpose()).eval()); + } + } diff --git a/test/product_extra.cpp b/test/product_extra.cpp index e2b855bff..15c69896e 100644 --- a/test/product_extra.cpp +++ b/test/product_extra.cpp @@ -11,7 +11,6 @@ template<typename MatrixType> void product_extra(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, 1, Dynamic> RowVectorType; typedef Matrix<Scalar, Dynamic, 1> ColVectorType; @@ -94,6 +93,22 @@ template<typename MatrixType> void product_extra(const MatrixType& m) VERIFY_IS_APPROX(m1.col(j2).adjoint() * m1.block(0,j,m1.rows(),c), m1.col(j2).adjoint().eval() * m1.block(0,j,m1.rows(),c).eval()); VERIFY_IS_APPROX(m1.block(i,0,r,m1.cols()) * m1.row(i2).adjoint(), m1.block(i,0,r,m1.cols()).eval() * m1.row(i2).adjoint().eval()); + + // test negative strides + { + Map<MatrixType,Unaligned,Stride<Dynamic,Dynamic> > map1(&m1(rows-1,cols-1), rows, cols, Stride<Dynamic,Dynamic>(-m1.outerStride(),-1)); + Map<MatrixType,Unaligned,Stride<Dynamic,Dynamic> > map2(&m2(rows-1,cols-1), rows, cols, Stride<Dynamic,Dynamic>(-m2.outerStride(),-1)); + Map<RowVectorType,Unaligned,InnerStride<-1> > mapv1(&v1(v1.size()-1), v1.size(), InnerStride<-1>(-1)); + Map<ColVectorType,Unaligned,InnerStride<-1> > mapvc2(&vc2(vc2.size()-1), vc2.size(), InnerStride<-1>(-1)); + VERIFY_IS_APPROX(MatrixType(map1), m1.reverse()); + VERIFY_IS_APPROX(MatrixType(map2), m2.reverse()); + VERIFY_IS_APPROX(m3.noalias() = MatrixType(map1) * MatrixType(map2).adjoint(), m1.reverse() * m2.reverse().adjoint()); + VERIFY_IS_APPROX(m3.noalias() = map1 * map2.adjoint(), m1.reverse() * m2.reverse().adjoint()); + VERIFY_IS_APPROX(map1 * vc2, m1.reverse() * vc2); + VERIFY_IS_APPROX(m1 * mapvc2, m1 * mapvc2); + VERIFY_IS_APPROX(map1.adjoint() * v1.transpose(), m1.adjoint().reverse() * v1.transpose()); + VERIFY_IS_APPROX(m1.adjoint() * mapv1.transpose(), m1.adjoint() * v1.reverse().transpose()); + } // regression test MatrixType tmp = m1 * m1.adjoint() * s1; @@ -353,7 +368,7 @@ void bug_1308() VERIFY_IS_APPROX(r44.noalias() += Vector4d::Ones() * m44.col(0).transpose(), ones44); } -void test_product_extra() +EIGEN_DECLARE_TEST(product_extra) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( product_extra(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); diff --git a/test/product_large.cpp b/test/product_large.cpp index 845cd40ca..3d0204b5f 100644 --- a/test/product_large.cpp +++ b/test/product_large.cpp @@ -8,6 +8,7 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "product.h" +#include <Eigen/LU> template<typename T> void test_aliasing() @@ -30,19 +31,9 @@ void test_aliasing() x = z; } -void test_product_large() +template<int> +void product_large_regressions() { - for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( product(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); - CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); - CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); - CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) ); - CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); - - CALL_SUBTEST_1( test_aliasing<float>() ); - } - -#if defined EIGEN_TEST_PART_6 { // test a specific issue in DiagonalProduct int N = 1000000; @@ -95,7 +86,40 @@ void test_product_large() * (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))); VERIFY_IS_APPROX(B,C); } -#endif +} + +template<int> +void bug_1622() { + typedef Matrix<double, 2, -1, 0, 2, -1> Mat2X; + Mat2X x(2,2); x.setRandom(); + MatrixXd y(2,2); y.setRandom(); + const Mat2X K1 = x * y.inverse(); + const Matrix2d K2 = x * y.inverse(); + VERIFY_IS_APPROX(K1,K2); +} + +EIGEN_DECLARE_TEST(product_large) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( product(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,10), internal::random<int>(1,10))) ); + + CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + + CALL_SUBTEST_1( test_aliasing<float>() ); + + CALL_SUBTEST_6( bug_1622<1>() ); + + CALL_SUBTEST_7( product(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_8( product(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_9( product(Matrix<std::complex<float>,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_10( product(Matrix<std::complex<double>,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); + } + + CALL_SUBTEST_6( product_large_regressions<0>() ); // Regression test for bug 714: #if defined EIGEN_HAS_OPENMP diff --git a/test/product_mmtr.cpp b/test/product_mmtr.cpp index d3e24b012..8f8c5fe1f 100644 --- a/test/product_mmtr.cpp +++ b/test/product_mmtr.cpp @@ -82,9 +82,19 @@ template<typename Scalar> void mmtr(int size) ref2.template triangularView<Lower>() = ref1.template triangularView<Lower>(); matc.template triangularView<Lower>() = sqc * matc * sqc.adjoint(); VERIFY_IS_APPROX(matc, ref2); + + // destination with a non-default inner-stride + // see bug 1741 + { + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; + MatrixX buffer(2*size,2*size); + Map<MatrixColMaj,0,Stride<Dynamic,Dynamic> > map1(buffer.data(),size,size,Stride<Dynamic,Dynamic>(2*size,2)); + buffer.setZero(); + CHECK_MMTR(map1, Lower, = s*soc*sor.adjoint()); + } } -void test_product_mmtr() +EIGEN_DECLARE_TEST(product_mmtr) { for(int i = 0; i < g_repeat ; i++) { diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp index 30592b79e..20cb7c080 100644 --- a/test/product_notemporary.cpp +++ b/test/product_notemporary.cpp @@ -11,11 +11,41 @@ #include "main.h" +template<typename Dst, typename Lhs, typename Rhs> +void check_scalar_multiple3(Dst &dst, const Lhs& A, const Rhs& B) +{ + VERIFY_EVALUATION_COUNT( (dst.noalias() = A * B), 0); + VERIFY_IS_APPROX( dst, (A.eval() * B.eval()).eval() ); + VERIFY_EVALUATION_COUNT( (dst.noalias() += A * B), 0); + VERIFY_IS_APPROX( dst, 2*(A.eval() * B.eval()).eval() ); + VERIFY_EVALUATION_COUNT( (dst.noalias() -= A * B), 0); + VERIFY_IS_APPROX( dst, (A.eval() * B.eval()).eval() ); +} + +template<typename Dst, typename Lhs, typename Rhs, typename S2> +void check_scalar_multiple2(Dst &dst, const Lhs& A, const Rhs& B, S2 s2) +{ + CALL_SUBTEST( check_scalar_multiple3(dst, A, B) ); + CALL_SUBTEST( check_scalar_multiple3(dst, A, -B) ); + CALL_SUBTEST( check_scalar_multiple3(dst, A, s2*B) ); + CALL_SUBTEST( check_scalar_multiple3(dst, A, B*s2) ); + CALL_SUBTEST( check_scalar_multiple3(dst, A, (B*s2).conjugate()) ); +} + +template<typename Dst, typename Lhs, typename Rhs, typename S1, typename S2> +void check_scalar_multiple1(Dst &dst, const Lhs& A, const Rhs& B, S1 s1, S2 s2) +{ + CALL_SUBTEST( check_scalar_multiple2(dst, A, B, s2) ); + CALL_SUBTEST( check_scalar_multiple2(dst, -A, B, s2) ); + CALL_SUBTEST( check_scalar_multiple2(dst, s1*A, B, s2) ); + CALL_SUBTEST( check_scalar_multiple2(dst, A*s1, B, s2) ); + CALL_SUBTEST( check_scalar_multiple2(dst, (A*s1).conjugate(), B, s2) ); +} + template<typename MatrixType> void product_notemporary(const MatrixType& m) { /* This test checks the number of temporaries created * during the evaluation of a complex expression */ - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix<Scalar, 1, Dynamic> RowVectorType; @@ -106,7 +136,9 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m) VERIFY_EVALUATION_COUNT( m3.noalias() = m1.block(r0,r0,r1,r1).template triangularView<UnitUpper>() * m2.block(r0,c0,r1,c1), 1); // Zero temporaries for lazy products ... + m3.setRandom(rows,cols); VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose().lazyProduct(m3)).diagonal().sum(), 0 ); + VERIFY_EVALUATION_COUNT( m3.noalias() = m1.conjugate().lazyProduct(m2.conjugate()), 0); // ... and even no temporary for even deeply (>=2) nested products VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose() * m3).diagonal().sum(), 0 ); @@ -128,11 +160,19 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m) VERIFY_EVALUATION_COUNT( cvres.noalias() = (rm3+rm3) * (m1*cv1), 1 ); // Check outer products + #ifdef EIGEN_ALLOCA + bool temp_via_alloca = m3.rows()*sizeof(Scalar) <= EIGEN_STACK_ALLOCATION_LIMIT; + #else + bool temp_via_alloca = false; + #endif m3 = cv1 * rv1; VERIFY_EVALUATION_COUNT( m3.noalias() = cv1 * rv1, 0 ); - VERIFY_EVALUATION_COUNT( m3.noalias() = (cv1+cv1) * (rv1+rv1), 1 ); + VERIFY_EVALUATION_COUNT( m3.noalias() = (cv1+cv1) * (rv1+rv1), temp_via_alloca ? 0 : 1 ); VERIFY_EVALUATION_COUNT( m3.noalias() = (m1*cv1) * (rv1), 1 ); VERIFY_EVALUATION_COUNT( m3.noalias() += (m1*cv1) * (rv1), 1 ); + rm3 = cv1 * rv1; + VERIFY_EVALUATION_COUNT( rm3.noalias() = cv1 * rv1, 0 ); + VERIFY_EVALUATION_COUNT( rm3.noalias() = (cv1+cv1) * (rv1+rv1), temp_via_alloca ? 0 : 1 ); VERIFY_EVALUATION_COUNT( rm3.noalias() = (cv1) * (rv1 * m1), 1 ); VERIFY_EVALUATION_COUNT( rm3.noalias() -= (cv1) * (rv1 * m1), 1 ); VERIFY_EVALUATION_COUNT( rm3.noalias() = (m1*cv1) * (rv1 * m1), 2 ); @@ -141,9 +181,18 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m) // Check nested products VERIFY_EVALUATION_COUNT( cvres.noalias() = m1.adjoint() * m1 * cv1, 1 ); VERIFY_EVALUATION_COUNT( rvres.noalias() = rv1 * (m1 * m2.adjoint()), 1 ); + + // exhaustively check all scalar multiple combinations: + { + // Generic path: + check_scalar_multiple1(m3, m1, m2, s1, s2); + // Force fall back to coeff-based: + typename ColMajorMatrixType::BlockXpr m3_blck = m3.block(r0,r0,1,1); + check_scalar_multiple1(m3_blck, m1.block(r0,c0,1,1), m2.block(c0,r0,1,1), s1, s2); + } } -void test_product_notemporary() +EIGEN_DECLARE_TEST(product_notemporary) { int s; for(int i = 0; i < g_repeat; i++) { diff --git a/test/product_selfadjoint.cpp b/test/product_selfadjoint.cpp index 3d768aa7e..bdccd0491 100644 --- a/test/product_selfadjoint.cpp +++ b/test/product_selfadjoint.cpp @@ -11,7 +11,6 @@ template<typename MatrixType> void product_selfadjoint(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; typedef Matrix<Scalar, 1, MatrixType::RowsAtCompileTime> RowVectorType; @@ -60,7 +59,7 @@ template<typename MatrixType> void product_selfadjoint(const MatrixType& m) } } -void test_product_selfadjoint() +EIGEN_DECLARE_TEST(product_selfadjoint) { int s = 0; for(int i = 0; i < g_repeat ; i++) { diff --git a/test/product_small.cpp b/test/product_small.cpp index fdfdd9f6c..1d6df6e58 100644 --- a/test/product_small.cpp +++ b/test/product_small.cpp @@ -56,6 +56,30 @@ test_lazy_single(int rows, int cols, int depth) VERIFY_IS_APPROX(C+=A.lazyProduct(B), ref_prod(D,A,B)); } +void test_dynamic_bool() +{ + int rows = internal::random<int>(1,64); + int cols = internal::random<int>(1,64); + int depth = internal::random<int>(1,65); + + typedef Matrix<bool,Dynamic,Dynamic> MatrixX; + MatrixX A(rows,depth); A.setRandom(); + MatrixX B(depth,cols); B.setRandom(); + MatrixX C(rows,cols); C.setRandom(); + MatrixX D(C); + for(Index i=0;i<C.rows();++i) + for(Index j=0;j<C.cols();++j) + for(Index k=0;k<A.cols();++k) + D.coeffRef(i,j) |= A.coeff(i,k) & B.coeff(k,j); + C += A * B; + VERIFY_IS_EQUAL(C, D); + + MatrixX E = B.transpose(); + for(Index i=0;i<B.rows();++i) + for(Index j=0;j<B.cols();++j) + VERIFY_IS_EQUAL(B(i,j), E(j,i)); +} + template<typename T, int Rows, int Cols, int Depth, int OC, int OA, int OB> typename internal::enable_if< ( (Rows ==1&&Depth!=1&&OA==ColMajor) || (Depth==1&&Rows !=1&&OA==RowMajor) @@ -78,7 +102,7 @@ void test_lazy_all_layout(int rows=Rows, int cols=Cols, int depth=Depth) CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,RowMajor,ColMajor,RowMajor>(rows,cols,depth) )); CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,ColMajor,RowMajor,RowMajor>(rows,cols,depth) )); CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,RowMajor,RowMajor,RowMajor>(rows,cols,depth) )); -} +} template<typename T> void test_lazy_l1() @@ -228,7 +252,37 @@ void bug_1311() VERIFY_IS_APPROX(res, A*b); } -void test_product_small() +template<int> +void product_small_regressions() +{ + { + // test compilation of (outer_product) * vector + Vector3f v = Vector3f::Random(); + VERIFY_IS_APPROX( (v * v.transpose()) * v, (v * v.transpose()).eval() * v); + } + + { + // regression test for pull-request #93 + Eigen::Matrix<double, 1, 1> A; A.setRandom(); + Eigen::Matrix<double, 18, 1> B; B.setRandom(); + Eigen::Matrix<double, 1, 18> C; C.setRandom(); + VERIFY_IS_APPROX(B * A.inverse(), B * A.inverse()[0]); + VERIFY_IS_APPROX(A.inverse() * C, A.inverse()[0] * C); + } + + { + Eigen::Matrix<double, 10, 10> A, B, C; + A.setRandom(); + C = A; + for(int k=0; k<79; ++k) + C = C * A; + B.noalias() = (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))) + * (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))); + VERIFY_IS_APPROX(B,C); + } +} + +EIGEN_DECLARE_TEST(product_small) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( product(Matrix<float, 3, 2>()) ); @@ -261,33 +315,9 @@ void test_product_small() CALL_SUBTEST_6( bug_1311<3>() ); CALL_SUBTEST_6( bug_1311<5>() ); - } -#ifdef EIGEN_TEST_PART_6 - { - // test compilation of (outer_product) * vector - Vector3f v = Vector3f::Random(); - VERIFY_IS_APPROX( (v * v.transpose()) * v, (v * v.transpose()).eval() * v); - } - - { - // regression test for pull-request #93 - Eigen::Matrix<double, 1, 1> A; A.setRandom(); - Eigen::Matrix<double, 18, 1> B; B.setRandom(); - Eigen::Matrix<double, 1, 18> C; C.setRandom(); - VERIFY_IS_APPROX(B * A.inverse(), B * A.inverse()[0]); - VERIFY_IS_APPROX(A.inverse() * C, A.inverse()[0] * C); + CALL_SUBTEST_9( test_dynamic_bool() ); } - { - Eigen::Matrix<double, 10, 10> A, B, C; - A.setRandom(); - C = A; - for(int k=0; k<79; ++k) - C = C * A; - B.noalias() = (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))) - * (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))); - VERIFY_IS_APPROX(B,C); - } -#endif + CALL_SUBTEST_6( product_small_regressions<0>() ); } diff --git a/test/product_symm.cpp b/test/product_symm.cpp index 8c44383f9..ea8d4d5cf 100644 --- a/test/product_symm.cpp +++ b/test/product_symm.cpp @@ -16,7 +16,6 @@ template<typename Scalar, int Size, int OtherSize> void symm(int size = Size, in typedef Matrix<Scalar, OtherSize, Size> Rhs2; enum { order = OtherSize==1 ? 0 : RowMajor }; typedef Matrix<Scalar, Size, OtherSize,order> Rhs3; - typedef typename MatrixType::Index Index; Index rows = size; Index cols = size; @@ -76,12 +75,12 @@ template<typename Scalar, int Size, int OtherSize> void symm(int size = Size, in rhs13 = (s1*m1.adjoint()) * (s2*rhs2.adjoint())); // test row major = <...> - m2 = m1.template triangularView<Lower>(); rhs12.setRandom(); rhs13 = rhs12; - VERIFY_IS_APPROX(rhs12 -= (s1*m2).template selfadjointView<Lower>() * (s2*rhs3), + m2 = m1.template triangularView<Lower>(); rhs32.setRandom(); rhs13 = rhs32; + VERIFY_IS_APPROX(rhs32.noalias() -= (s1*m2).template selfadjointView<Lower>() * (s2*rhs3), rhs13 -= (s1*m1) * (s2 * rhs3)); m2 = m1.template triangularView<Upper>(); - VERIFY_IS_APPROX(rhs12 = (s1*m2.adjoint()).template selfadjointView<Lower>() * (s2*rhs3).conjugate(), + VERIFY_IS_APPROX(rhs32.noalias() = (s1*m2.adjoint()).template selfadjointView<Lower>() * (s2*rhs3).conjugate(), rhs13 = (s1*m1.adjoint()) * (s2*rhs3).conjugate()); @@ -93,9 +92,23 @@ template<typename Scalar, int Size, int OtherSize> void symm(int size = Size, in VERIFY_IS_APPROX(rhs22 = (rhs2) * (m2).template selfadjointView<Lower>(), rhs23 = (rhs2) * (m1)); VERIFY_IS_APPROX(rhs22 = (s2*rhs2) * (s1*m2).template selfadjointView<Lower>(), rhs23 = (s2*rhs2) * (s1*m1)); + // destination with a non-default inner-stride + // see bug 1741 + { + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; + MatrixX buffer(2*cols,2*othersize); + Map<Rhs1,0,Stride<Dynamic,2> > map1(buffer.data(),cols,othersize,Stride<Dynamic,2>(2*rows,2)); + buffer.setZero(); + VERIFY_IS_APPROX( map1.noalias() = (s1*m2).template selfadjointView<Lower>() * (s2*rhs1), + rhs13 = (s1*m1) * (s2*rhs1)); + + Map<Rhs2,0,Stride<Dynamic,2> > map2(buffer.data(),rhs22.rows(),rhs22.cols(),Stride<Dynamic,2>(2*rhs22.outerStride(),2)); + buffer.setZero(); + VERIFY_IS_APPROX(map2 = (rhs2) * (m2).template selfadjointView<Lower>(), rhs23 = (rhs2) * (m1)); + } } -void test_product_symm() +EIGEN_DECLARE_TEST(product_symm) { for(int i = 0; i < g_repeat ; i++) { diff --git a/test/product_syrk.cpp b/test/product_syrk.cpp index e10f0f2f2..8becd37fc 100644 --- a/test/product_syrk.cpp +++ b/test/product_syrk.cpp @@ -11,7 +11,6 @@ template<typename MatrixType> void syrk(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime, RowMajor> RMatrixType; typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, Dynamic> Rhs1; @@ -116,9 +115,20 @@ template<typename MatrixType> void syrk(const MatrixType& m) m2.setZero(); VERIFY_IS_APPROX((m2.template selfadjointView<Upper>().rankUpdate(m1.row(c).adjoint(),s1)._expression()), ((s1 * m1.row(c).adjoint() * m1.row(c).adjoint().adjoint()).eval().template triangularView<Upper>().toDenseMatrix())); + + // destination with a non-default inner-stride + // see bug 1741 + { + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; + MatrixX buffer(2*rows,2*cols); + Map<MatrixType,0,Stride<Dynamic,2> > map1(buffer.data(),rows,cols,Stride<Dynamic,2>(2*rows,2)); + buffer.setZero(); + VERIFY_IS_APPROX((map1.template selfadjointView<Lower>().rankUpdate(rhs2,s1)._expression()), + ((s1 * rhs2 * rhs2.adjoint()).eval().template triangularView<Lower>().toDenseMatrix())); + } } -void test_product_syrk() +EIGEN_DECLARE_TEST(product_syrk) { for(int i = 0; i < g_repeat ; i++) { diff --git a/test/product_trmm.cpp b/test/product_trmm.cpp index 12e554410..2bb4b9e47 100644 --- a/test/product_trmm.cpp +++ b/test/product_trmm.cpp @@ -29,7 +29,7 @@ void trmm(int rows=get_random_size<Scalar>(), typedef Matrix<Scalar,Dynamic,OtherCols,OtherCols==1?ColMajor:ResOrder> ResXS; typedef Matrix<Scalar,OtherCols,Dynamic,OtherCols==1?RowMajor:ResOrder> ResSX; - TriMatrix mat(rows,cols), tri(rows,cols), triTr(cols,rows); + TriMatrix mat(rows,cols), tri(rows,cols), triTr(cols,rows), s1tri(rows,cols), s1triTr(cols,rows); OnTheRight ge_right(cols,otherCols); OnTheLeft ge_left(otherCols,rows); @@ -42,6 +42,8 @@ void trmm(int rows=get_random_size<Scalar>(), mat.setRandom(); tri = mat.template triangularView<Mode>(); triTr = mat.transpose().template triangularView<Mode>(); + s1tri = (s1*mat).template triangularView<Mode>(); + s1triTr = (s1*mat).transpose().template triangularView<Mode>(); ge_right.setRandom(); ge_left.setRandom(); @@ -51,21 +53,41 @@ void trmm(int rows=get_random_size<Scalar>(), VERIFY_IS_APPROX( ge_xs.noalias() = mat.template triangularView<Mode>() * ge_right, tri * ge_right); VERIFY_IS_APPROX( ge_sx.noalias() = ge_left * mat.template triangularView<Mode>(), ge_left * tri); - VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.transpose()), s1*triTr.conjugate() * (s2*ge_left.transpose())); - VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.transpose() * mat.adjoint().template triangularView<Mode>(), ge_right.transpose() * triTr.conjugate()); + if((Mode&UnitDiag)==0) + VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.transpose()), s1*triTr.conjugate() * (s2*ge_left.transpose())); - VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()), s1*triTr.conjugate() * (s2*ge_left.adjoint())); - VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.adjoint() * mat.adjoint().template triangularView<Mode>(), ge_right.adjoint() * triTr.conjugate()); + VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.transpose()).template triangularView<Mode>() * (s2*ge_left.transpose()), s1triTr * (s2*ge_left.transpose())); + VERIFY_IS_APPROX( ge_sx.noalias() = (s2*ge_left) * (s1*mat).template triangularView<Mode>(), (s2*ge_left)*s1tri); + VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.transpose() * mat.adjoint().template triangularView<Mode>(), ge_right.transpose() * triTr.conjugate()); + VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.adjoint() * mat.adjoint().template triangularView<Mode>(), ge_right.adjoint() * triTr.conjugate()); + + ge_xs_save = ge_xs; + if((Mode&UnitDiag)==0) + VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()) ); ge_xs_save = ge_xs; - VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()) ); + VERIFY_IS_APPROX( (ge_xs_save + s1triTr * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.transpose()).template triangularView<Mode>() * (s2*ge_left.adjoint()) ); ge_sx.setRandom(); ge_sx_save = ge_sx; - VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView<Mode>()).eval()); - - VERIFY_IS_APPROX( ge_xs = (s1*mat).adjoint().template triangularView<Mode>() * ge_left.adjoint(), numext::conj(s1) * triTr.conjugate() * ge_left.adjoint()); + if((Mode&UnitDiag)==0) + VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView<Mode>()).eval()); + if((Mode&UnitDiag)==0) + VERIFY_IS_APPROX( ge_xs = (s1*mat).adjoint().template triangularView<Mode>() * ge_left.adjoint(), numext::conj(s1) * triTr.conjugate() * ge_left.adjoint()); + VERIFY_IS_APPROX( ge_xs = (s1*mat).transpose().template triangularView<Mode>() * ge_left.adjoint(), s1triTr * ge_left.adjoint()); + // TODO check with sub-matrix expressions ? + + // destination with a non-default inner-stride + // see bug 1741 + { + VERIFY_IS_APPROX( ge_xs.noalias() = mat.template triangularView<Mode>() * ge_right, tri * ge_right); + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; + MatrixX buffer(2*ge_xs.rows(),2*ge_xs.cols()); + Map<ResXS,0,Stride<Dynamic,2> > map1(buffer.data(),ge_xs.rows(),ge_xs.cols(),Stride<Dynamic,2>(2*ge_xs.outerStride(),2)); + buffer.setZero(); + VERIFY_IS_APPROX( map1.noalias() = mat.template triangularView<Mode>() * ge_right, tri * ge_right); + } } template<typename Scalar, int Mode, int TriOrder> @@ -103,7 +125,7 @@ void trmm(int rows=get_random_size<Scalar>(), int cols=get_random_size<Scalar>() CALL_ALL_ORDERS(EIGEN_CAT(3,NB),SCALAR,StrictlyLower) -void test_product_trmm() +EIGEN_DECLARE_TEST(product_trmm) { for(int i = 0; i < g_repeat ; i++) { diff --git a/test/product_trmv.cpp b/test/product_trmv.cpp index 57a202afc..5eb1b5ac0 100644 --- a/test/product_trmv.cpp +++ b/test/product_trmv.cpp @@ -11,7 +11,6 @@ template<typename MatrixType> void trmv(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; @@ -71,7 +70,7 @@ template<typename MatrixType> void trmv(const MatrixType& m) // TODO check with sub-matrices } -void test_product_trmv() +EIGEN_DECLARE_TEST(product_trmv) { int s = 0; for(int i = 0; i < g_repeat ; i++) { diff --git a/test/product_trsolve.cpp b/test/product_trsolve.cpp index 4b97fa9d6..c59748c5b 100644 --- a/test/product_trsolve.cpp +++ b/test/product_trsolve.cpp @@ -71,9 +71,35 @@ template<typename Scalar,int Size, int Cols> void trsolve(int size=Size,int cols int c = internal::random<int>(0,cols-1); VERIFY_TRSM(rmLhs.template triangularView<Lower>(), rmRhs.col(c)); VERIFY_TRSM(cmLhs.template triangularView<Lower>(), rmRhs.col(c)); + + // destination with a non-default inner-stride + // see bug 1741 + { + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; + MatrixX buffer(2*cmRhs.rows(),2*cmRhs.cols()); + Map<Matrix<Scalar,Size,Cols,colmajor>,0,Stride<Dynamic,2> > map1(buffer.data(),cmRhs.rows(),cmRhs.cols(),Stride<Dynamic,2>(2*cmRhs.outerStride(),2)); + Map<Matrix<Scalar,Size,Cols,rowmajor>,0,Stride<Dynamic,2> > map2(buffer.data(),rmRhs.rows(),rmRhs.cols(),Stride<Dynamic,2>(2*rmRhs.outerStride(),2)); + buffer.setZero(); + VERIFY_TRSM(cmLhs.conjugate().template triangularView<Lower>(), map1); + buffer.setZero(); + VERIFY_TRSM(cmLhs .template triangularView<Lower>(), map2); + } + + if(Size==Dynamic) + { + cmLhs.resize(0,0); + cmRhs.resize(0,cmRhs.cols()); + Matrix<Scalar,Size,Cols,colmajor> res = cmLhs.template triangularView<Lower>().solve(cmRhs); + VERIFY_IS_EQUAL(res.rows(),0); + VERIFY_IS_EQUAL(res.cols(),cmRhs.cols()); + res = cmRhs; + cmLhs.template triangularView<Lower>().solveInPlace(res); + VERIFY_IS_EQUAL(res.rows(),0); + VERIFY_IS_EQUAL(res.cols(),cmRhs.cols()); + } } -void test_product_trsolve() +EIGEN_DECLARE_TEST(product_trsolve) { for(int i = 0; i < g_repeat ; i++) { diff --git a/test/qr.cpp b/test/qr.cpp index dfcc1e8f9..c38e3439b 100644 --- a/test/qr.cpp +++ b/test/qr.cpp @@ -9,11 +9,10 @@ #include "main.h" #include <Eigen/QR> +#include "solverbase.h" template<typename MatrixType> void qr(const MatrixType& m) { - typedef typename MatrixType::Index Index; - Index rows = m.rows(); Index cols = m.cols(); @@ -43,11 +42,7 @@ template<typename MatrixType, int Cols2> void qr_fixedsize() VERIFY_IS_APPROX(m1, qr.householderQ() * r); - Matrix<Scalar,Cols,Cols2> m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2); - Matrix<Scalar,Rows,Cols2> m3 = m1*m2; - m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2); - m2 = qr.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); + check_solverbase<Matrix<Scalar,Cols,Cols2>, Matrix<Scalar,Rows,Cols2> >(m1, qr, Rows, Cols, Cols2); } template<typename MatrixType> void qr_invertible() @@ -59,6 +54,8 @@ template<typename MatrixType> void qr_invertible() typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename MatrixType::Scalar Scalar; + STATIC_CHECK(( internal::is_same<typename HouseholderQR<MatrixType>::StorageIndex,int>::value )); + int size = internal::random<int>(10,50); MatrixType m1(size, size), m2(size, size), m3(size, size); @@ -72,9 +69,8 @@ template<typename MatrixType> void qr_invertible() } HouseholderQR<MatrixType> qr(m1); - m3 = MatrixType::Random(size,size); - m2 = qr.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); + + check_solverbase<MatrixType, MatrixType>(m1, qr, size, size, size); // now construct a matrix with prescribed determinant m1.setZero(); @@ -85,7 +81,7 @@ template<typename MatrixType> void qr_invertible() qr.compute(m1); VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant()); // This test is tricky if the determinant becomes too small. - // Since we generate random numbers with magnitude rrange [0,1], the average determinant is 0.5^size + // Since we generate random numbers with magnitude range [0,1], the average determinant is 0.5^size VERIFY_IS_MUCH_SMALLER_THAN( abs(absdet-qr.absDeterminant()), numext::maxi(RealScalar(pow(0.5,size)),numext::maxi<RealScalar>(abs(absdet),abs(qr.absDeterminant()))) ); } @@ -97,12 +93,14 @@ template<typename MatrixType> void qr_verify_assert() HouseholderQR<MatrixType> qr; VERIFY_RAISES_ASSERT(qr.matrixQR()) VERIFY_RAISES_ASSERT(qr.solve(tmp)) + VERIFY_RAISES_ASSERT(qr.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(qr.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(qr.householderQ()) VERIFY_RAISES_ASSERT(qr.absDeterminant()) VERIFY_RAISES_ASSERT(qr.logAbsDeterminant()) } -void test_qr() +EIGEN_DECLARE_TEST(qr) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( qr(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp index 26ed27f5c..06f16438f 100644 --- a/test/qr_colpivoting.cpp +++ b/test/qr_colpivoting.cpp @@ -11,10 +11,11 @@ #include "main.h" #include <Eigen/QR> #include <Eigen/SVD> +#include "solverbase.h" template <typename MatrixType> void cod() { - typedef typename MatrixType::Index Index; + STATIC_CHECK(( internal::is_same<typename CompleteOrthogonalDecomposition<MatrixType>::StorageIndex,int>::value )); Index rows = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE); Index cols = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE); @@ -48,12 +49,12 @@ void cod() { MatrixType c = q * t * z * cod.colsPermutation().inverse(); VERIFY_IS_APPROX(matrix, c); + check_solverbase<MatrixType, MatrixType>(matrix, cod, rows, cols, cols2); + + // Verify that we get the same minimum-norm solution as the SVD. MatrixType exact_solution = MatrixType::Random(cols, cols2); MatrixType rhs = matrix * exact_solution; MatrixType cod_solution = cod.solve(rhs); - VERIFY_IS_APPROX(rhs, matrix * cod_solution); - - // Verify that we get the same minimum-norm solution as the SVD. JacobiSVD<MatrixType> svd(matrix, ComputeThinU | ComputeThinV); MatrixType svd_solution = svd.solve(rhs); VERIFY_IS_APPROX(cod_solution, svd_solution); @@ -69,32 +70,37 @@ void cod_fixedsize() { Cols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; + typedef CompleteOrthogonalDecomposition<Matrix<Scalar, Rows, Cols> > COD; int rank = internal::random<int>(1, (std::min)(int(Rows), int(Cols)) - 1); Matrix<Scalar, Rows, Cols> matrix; createRandomPIMatrixOfRank(rank, Rows, Cols, matrix); - CompleteOrthogonalDecomposition<Matrix<Scalar, Rows, Cols> > cod(matrix); + COD cod(matrix); VERIFY(rank == cod.rank()); VERIFY(Cols - cod.rank() == cod.dimensionOfKernel()); VERIFY(cod.isInjective() == (rank == Rows)); VERIFY(cod.isSurjective() == (rank == Cols)); VERIFY(cod.isInvertible() == (cod.isInjective() && cod.isSurjective())); + check_solverbase<Matrix<Scalar, Cols, Cols2>, Matrix<Scalar, Rows, Cols2> >(matrix, cod, Rows, Cols, Cols2); + + // Verify that we get the same minimum-norm solution as the SVD. Matrix<Scalar, Cols, Cols2> exact_solution; exact_solution.setRandom(Cols, Cols2); Matrix<Scalar, Rows, Cols2> rhs = matrix * exact_solution; Matrix<Scalar, Cols, Cols2> cod_solution = cod.solve(rhs); - VERIFY_IS_APPROX(rhs, matrix * cod_solution); - - // Verify that we get the same minimum-norm solution as the SVD. JacobiSVD<MatrixType> svd(matrix, ComputeFullU | ComputeFullV); Matrix<Scalar, Cols, Cols2> svd_solution = svd.solve(rhs); VERIFY_IS_APPROX(cod_solution, svd_solution); + + typename Inverse<COD>::PlainObject pinv = cod.pseudoInverse(); + VERIFY_IS_APPROX(cod_solution, pinv * rhs); } template<typename MatrixType> void qr() { using std::sqrt; - typedef typename MatrixType::Index Index; + + STATIC_CHECK(( internal::is_same<typename ColPivHouseholderQR<MatrixType>::StorageIndex,int>::value )); Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols2 = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE); Index rank = internal::random<Index>(1, (std::min)(rows, cols)-1); @@ -136,13 +142,10 @@ template<typename MatrixType> void qr() VERIFY_IS_APPROX_OR_LESS_THAN(y, x); } - MatrixType m2 = MatrixType::Random(cols,cols2); - MatrixType m3 = m1*m2; - m2 = MatrixType::Random(cols,cols2); - m2 = qr.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); + check_solverbase<MatrixType, MatrixType>(m1, qr, rows, cols, cols2); { + MatrixType m2, m3; Index size = rows; do { m1 = MatrixType::Random(size,size); @@ -176,11 +179,8 @@ template<typename MatrixType, int Cols2> void qr_fixedsize() Matrix<Scalar,Rows,Cols> c = qr.householderQ() * r * qr.colsPermutation().inverse(); VERIFY_IS_APPROX(m1, c); - Matrix<Scalar,Cols,Cols2> m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2); - Matrix<Scalar,Rows,Cols2> m3 = m1*m2; - m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2); - m2 = qr.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); + check_solverbase<Matrix<Scalar,Cols,Cols2>, Matrix<Scalar,Rows,Cols2> >(m1, qr, Rows, Cols, Cols2); + // Verify that the absolute value of the diagonal elements in R are // non-increasing until they reache the singularity threshold. RealScalar threshold = @@ -211,7 +211,6 @@ template<typename MatrixType> void qr_kahan_matrix() { using std::sqrt; using std::abs; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -268,9 +267,8 @@ template<typename MatrixType> void qr_invertible() } ColPivHouseholderQR<MatrixType> qr(m1); - m3 = MatrixType::Random(size,size); - m2 = qr.solve(m3); - //VERIFY_IS_APPROX(m3, m1*m2); + + check_solverbase<MatrixType, MatrixType>(m1, qr, size, size, size); // now construct a matrix with prescribed determinant m1.setZero(); @@ -290,6 +288,8 @@ template<typename MatrixType> void qr_verify_assert() ColPivHouseholderQR<MatrixType> qr; VERIFY_RAISES_ASSERT(qr.matrixQR()) VERIFY_RAISES_ASSERT(qr.solve(tmp)) + VERIFY_RAISES_ASSERT(qr.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(qr.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(qr.householderQ()) VERIFY_RAISES_ASSERT(qr.dimensionOfKernel()) VERIFY_RAISES_ASSERT(qr.isInjective()) @@ -300,7 +300,26 @@ template<typename MatrixType> void qr_verify_assert() VERIFY_RAISES_ASSERT(qr.logAbsDeterminant()) } -void test_qr_colpivoting() +template<typename MatrixType> void cod_verify_assert() +{ + MatrixType tmp; + + CompleteOrthogonalDecomposition<MatrixType> cod; + VERIFY_RAISES_ASSERT(cod.matrixQTZ()) + VERIFY_RAISES_ASSERT(cod.solve(tmp)) + VERIFY_RAISES_ASSERT(cod.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(cod.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(cod.householderQ()) + VERIFY_RAISES_ASSERT(cod.dimensionOfKernel()) + VERIFY_RAISES_ASSERT(cod.isInjective()) + VERIFY_RAISES_ASSERT(cod.isSurjective()) + VERIFY_RAISES_ASSERT(cod.isInvertible()) + VERIFY_RAISES_ASSERT(cod.pseudoInverse()) + VERIFY_RAISES_ASSERT(cod.absDeterminant()) + VERIFY_RAISES_ASSERT(cod.logAbsDeterminant()) +} + +EIGEN_DECLARE_TEST(qr_colpivoting) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( qr<MatrixXf>() ); @@ -334,6 +353,13 @@ void test_qr_colpivoting() CALL_SUBTEST_6(qr_verify_assert<MatrixXcf>()); CALL_SUBTEST_3(qr_verify_assert<MatrixXcd>()); + CALL_SUBTEST_7(cod_verify_assert<Matrix3f>()); + CALL_SUBTEST_8(cod_verify_assert<Matrix3d>()); + CALL_SUBTEST_1(cod_verify_assert<MatrixXf>()); + CALL_SUBTEST_2(cod_verify_assert<MatrixXd>()); + CALL_SUBTEST_6(cod_verify_assert<MatrixXcf>()); + CALL_SUBTEST_3(cod_verify_assert<MatrixXcd>()); + // Test problem size constructors CALL_SUBTEST_9(ColPivHouseholderQR<MatrixXf>(10, 20)); diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp index 70e89c198..f2d8cb33e 100644 --- a/test/qr_fullpivoting.cpp +++ b/test/qr_fullpivoting.cpp @@ -10,16 +10,18 @@ #include "main.h" #include <Eigen/QR> +#include "solverbase.h" template<typename MatrixType> void qr() { - typedef typename MatrixType::Index Index; + STATIC_CHECK(( internal::is_same<typename FullPivHouseholderQR<MatrixType>::StorageIndex,int>::value )); + static const int Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime; Index max_size = EIGEN_TEST_MAX_SIZE; Index min_size = numext::maxi(1,EIGEN_TEST_MAX_SIZE/10); - Index rows = internal::random<Index>(min_size,max_size), - cols = internal::random<Index>(min_size,max_size), - cols2 = internal::random<Index>(min_size,max_size), + Index rows = Rows == Dynamic ? internal::random<Index>(min_size,max_size) : Rows, + cols = Cols == Dynamic ? internal::random<Index>(min_size,max_size) : Cols, + cols2 = Cols == Dynamic ? internal::random<Index>(min_size,max_size) : Cols, rank = internal::random<Index>(1, (std::min)(rows, cols)-1); typedef typename MatrixType::Scalar Scalar; @@ -49,13 +51,10 @@ template<typename MatrixType> void qr() MatrixType tmp; VERIFY_IS_APPROX(tmp.noalias() = qr.matrixQ() * r, (qr.matrixQ() * r).eval()); - MatrixType m2 = MatrixType::Random(cols,cols2); - MatrixType m3 = m1*m2; - m2 = MatrixType::Random(cols,cols2); - m2 = qr.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); + check_solverbase<MatrixType, MatrixType>(m1, qr, rows, cols, cols2); { + MatrixType m2, m3; Index size = rows; do { m1 = MatrixType::Random(size,size); @@ -94,9 +93,7 @@ template<typename MatrixType> void qr_invertible() VERIFY(qr.isInvertible()); VERIFY(qr.isSurjective()); - m3 = MatrixType::Random(size,size); - m2 = qr.solve(m3); - VERIFY_IS_APPROX(m3, m1*m2); + check_solverbase<MatrixType, MatrixType>(m1, qr, size, size, size); // now construct a matrix with prescribed determinant m1.setZero(); @@ -116,6 +113,8 @@ template<typename MatrixType> void qr_verify_assert() FullPivHouseholderQR<MatrixType> qr; VERIFY_RAISES_ASSERT(qr.matrixQR()) VERIFY_RAISES_ASSERT(qr.solve(tmp)) + VERIFY_RAISES_ASSERT(qr.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(qr.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(qr.matrixQ()) VERIFY_RAISES_ASSERT(qr.dimensionOfKernel()) VERIFY_RAISES_ASSERT(qr.isInjective()) @@ -126,11 +125,12 @@ template<typename MatrixType> void qr_verify_assert() VERIFY_RAISES_ASSERT(qr.logAbsDeterminant()) } -void test_qr_fullpivoting() +EIGEN_DECLARE_TEST(qr_fullpivoting) { - for(int i = 0; i < 1; i++) { - // FIXME : very weird bug here -// CALL_SUBTEST(qr(Matrix2f()) ); + for(int i = 0; i < 1; i++) { + CALL_SUBTEST_5( qr<Matrix3f>() ); + CALL_SUBTEST_6( qr<Matrix3d>() ); + CALL_SUBTEST_8( qr<Matrix2f>() ); CALL_SUBTEST_1( qr<MatrixXf>() ); CALL_SUBTEST_2( qr<MatrixXd>() ); CALL_SUBTEST_3( qr<MatrixXcd>() ); diff --git a/test/qtvector.cpp b/test/qtvector.cpp index 2be885e48..4ec79b1e6 100644 --- a/test/qtvector.cpp +++ b/test/qtvector.cpp @@ -18,8 +18,6 @@ template<typename MatrixType> void check_qtvector_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; - Index rows = m.rows(); Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); @@ -127,7 +125,7 @@ void check_qtvector_quaternion(const QuaternionType&) } } -void test_qtvector() +EIGEN_DECLARE_TEST(qtvector) { // some non vectorizable fixed sizes CALL_SUBTEST(check_qtvector_matrix(Vector2f())); diff --git a/test/rand.cpp b/test/rand.cpp index 51cf01773..984c01f53 100644 --- a/test/rand.cpp +++ b/test/rand.cpp @@ -51,10 +51,10 @@ template<typename Scalar> void check_histogram(Scalar x, Scalar y, int bins) Scalar r = check_in_range(x,y); hist( int((int64(r)-int64(x))/divisor) )++; } - VERIFY( (((hist.cast<double>()/double(f))-1.0).abs()<0.02).all() ); + VERIFY( (((hist.cast<double>()/double(f))-1.0).abs()<0.03).all() ); } -void test_rand() +EIGEN_DECLARE_TEST(rand) { long long_ref = NumTraits<long>::highest()/10; signed char char_offset = (std::min)(g_repeat,64); diff --git a/test/random_without_cast_overflow.h b/test/random_without_cast_overflow.h new file mode 100644 index 000000000..000345110 --- /dev/null +++ b/test/random_without_cast_overflow.h @@ -0,0 +1,152 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020 C. Antonio Sanchez <cantonios@google.com> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// Utilities for generating random numbers without overflows, which might +// otherwise result in undefined behavior. + +namespace Eigen { +namespace internal { + +// Default implementation assuming SrcScalar fits into TgtScalar. +template <typename SrcScalar, typename TgtScalar, typename EnableIf = void> +struct random_without_cast_overflow { + static SrcScalar value() { return internal::random<SrcScalar>(); } +}; + +// Signed to unsigned integer widening cast. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<NumTraits<SrcScalar>::IsInteger && NumTraits<TgtScalar>::IsInteger && + !NumTraits<TgtScalar>::IsSigned && + (std::numeric_limits<SrcScalar>::digits < std::numeric_limits<TgtScalar>::digits || + (std::numeric_limits<SrcScalar>::digits == std::numeric_limits<TgtScalar>::digits && + NumTraits<SrcScalar>::IsSigned))>::type> { + static SrcScalar value() { + SrcScalar a = internal::random<SrcScalar>(); + return a < SrcScalar(0) ? -(a + 1) : a; + } +}; + +// Integer to unsigned narrowing cast. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if< + NumTraits<SrcScalar>::IsInteger && NumTraits<TgtScalar>::IsInteger && !NumTraits<SrcScalar>::IsSigned && + (std::numeric_limits<SrcScalar>::digits > std::numeric_limits<TgtScalar>::digits)>::type> { + static SrcScalar value() { + TgtScalar b = internal::random<TgtScalar>(); + return static_cast<SrcScalar>(b < TgtScalar(0) ? -(b + 1) : b); + } +}; + +// Integer to signed narrowing cast. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if< + NumTraits<SrcScalar>::IsInteger && NumTraits<TgtScalar>::IsInteger && NumTraits<SrcScalar>::IsSigned && + (std::numeric_limits<SrcScalar>::digits > std::numeric_limits<TgtScalar>::digits)>::type> { + static SrcScalar value() { return static_cast<SrcScalar>(internal::random<TgtScalar>()); } +}; + +// Unsigned to signed integer narrowing cast. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<NumTraits<SrcScalar>::IsInteger && NumTraits<TgtScalar>::IsInteger && + !NumTraits<SrcScalar>::IsSigned && NumTraits<TgtScalar>::IsSigned && + (std::numeric_limits<SrcScalar>::digits == + std::numeric_limits<TgtScalar>::digits)>::type> { + static SrcScalar value() { return internal::random<SrcScalar>() / 2; } +}; + +// Floating-point to integer, full precision. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if< + !NumTraits<SrcScalar>::IsInteger && !NumTraits<SrcScalar>::IsComplex && NumTraits<TgtScalar>::IsInteger && + (std::numeric_limits<TgtScalar>::digits <= std::numeric_limits<SrcScalar>::digits)>::type> { + static SrcScalar value() { return static_cast<SrcScalar>(internal::random<TgtScalar>()); } +}; + +// Floating-point to integer, narrowing precision. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if< + !NumTraits<SrcScalar>::IsInteger && !NumTraits<SrcScalar>::IsComplex && NumTraits<TgtScalar>::IsInteger && + (std::numeric_limits<TgtScalar>::digits > std::numeric_limits<SrcScalar>::digits)>::type> { + static SrcScalar value() { + // NOTE: internal::random<T>() is limited by RAND_MAX, so random<int64_t> is always within that range. + // This prevents us from simply shifting bits, which would result in only 0 or -1. + // Instead, keep least-significant K bits and sign. + static const TgtScalar KeepMask = (static_cast<TgtScalar>(1) << std::numeric_limits<SrcScalar>::digits) - 1; + const TgtScalar a = internal::random<TgtScalar>(); + return static_cast<SrcScalar>(a > TgtScalar(0) ? (a & KeepMask) : -(a & KeepMask)); + } +}; + +// Integer to floating-point, re-use above logic. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<NumTraits<SrcScalar>::IsInteger && !NumTraits<TgtScalar>::IsInteger && + !NumTraits<TgtScalar>::IsComplex>::type> { + static SrcScalar value() { + return static_cast<SrcScalar>(random_without_cast_overflow<TgtScalar, SrcScalar>::value()); + } +}; + +// Floating-point narrowing conversion. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<!NumTraits<SrcScalar>::IsInteger && !NumTraits<SrcScalar>::IsComplex && + !NumTraits<TgtScalar>::IsInteger && !NumTraits<TgtScalar>::IsComplex && + (std::numeric_limits<SrcScalar>::digits > + std::numeric_limits<TgtScalar>::digits)>::type> { + static SrcScalar value() { return static_cast<SrcScalar>(internal::random<TgtScalar>()); } +}; + +// Complex to non-complex. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<NumTraits<SrcScalar>::IsComplex && !NumTraits<TgtScalar>::IsComplex>::type> { + typedef typename NumTraits<SrcScalar>::Real SrcReal; + static SrcScalar value() { return SrcScalar(random_without_cast_overflow<SrcReal, TgtScalar>::value(), 0); } +}; + +// Non-complex to complex. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<!NumTraits<SrcScalar>::IsComplex && NumTraits<TgtScalar>::IsComplex>::type> { + typedef typename NumTraits<TgtScalar>::Real TgtReal; + static SrcScalar value() { return random_without_cast_overflow<SrcScalar, TgtReal>::value(); } +}; + +// Complex to complex. +template <typename SrcScalar, typename TgtScalar> +struct random_without_cast_overflow< + SrcScalar, TgtScalar, + typename internal::enable_if<NumTraits<SrcScalar>::IsComplex && NumTraits<TgtScalar>::IsComplex>::type> { + typedef typename NumTraits<SrcScalar>::Real SrcReal; + typedef typename NumTraits<TgtScalar>::Real TgtReal; + static SrcScalar value() { + return SrcScalar(random_without_cast_overflow<SrcReal, TgtReal>::value(), + random_without_cast_overflow<SrcReal, TgtReal>::value()); + } +}; + +} // namespace internal +} // namespace Eigen diff --git a/test/real_qz.cpp b/test/real_qz.cpp index 99ac31235..1cf7aba2d 100644 --- a/test/real_qz.cpp +++ b/test/real_qz.cpp @@ -18,7 +18,6 @@ template<typename MatrixType> void real_qz(const MatrixType& m) RealQZ.h */ using std::abs; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index dim = m.cols(); @@ -76,7 +75,7 @@ template<typename MatrixType> void real_qz(const MatrixType& m) VERIFY_IS_APPROX(qz.matrixZ()*qz.matrixZ().adjoint(), MatrixType::Identity(dim,dim)); } -void test_real_qz() +EIGEN_DECLARE_TEST(real_qz) { int s = 0; for(int i = 0; i < g_repeat; i++) { diff --git a/test/redux.cpp b/test/redux.cpp index 989e1057b..fdbab7714 100644 --- a/test/redux.cpp +++ b/test/redux.cpp @@ -9,12 +9,13 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define TEST_ENABLE_TEMPORARY_TRACKING +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +// ^^ see bug 1449 #include "main.h" template<typename MatrixType> void matrixRedux(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -27,6 +28,9 @@ template<typename MatrixType> void matrixRedux(const MatrixType& m) // failures if we underflow into denormals. Thus, we scale so that entries are close to 1. MatrixType m1_for_prod = MatrixType::Ones(rows, cols) + RealScalar(0.2) * m1; + Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> m2(rows,rows); + m2.setRandom(); + VERIFY_IS_MUCH_SMALLER_THAN(MatrixType::Zero(rows, cols).sum(), Scalar(1)); VERIFY_IS_APPROX(MatrixType::Ones(rows, cols).sum(), Scalar(float(rows*cols))); // the float() here to shut up excessive MSVC warning about int->complex conversion being lossy Scalar s(0), p(1), minc(numext::real(m1.coeff(0))), maxc(numext::real(m1.coeff(0))); @@ -45,6 +49,10 @@ template<typename MatrixType> void matrixRedux(const MatrixType& m) VERIFY_IS_APPROX(m1_for_prod.prod(), p); VERIFY_IS_APPROX(m1.real().minCoeff(), numext::real(minc)); VERIFY_IS_APPROX(m1.real().maxCoeff(), numext::real(maxc)); + + // test that partial reduction works if nested expressions is forced to evaluate early + VERIFY_IS_APPROX((m1.matrix() * m1.matrix().transpose()) .cwiseProduct(m2.matrix()).rowwise().sum().sum(), + (m1.matrix() * m1.matrix().transpose()).eval().cwiseProduct(m2.matrix()).rowwise().sum().sum()); // test slice vectorization assuming assign is ok Index r0 = internal::random<Index>(0,rows-1); @@ -71,15 +79,12 @@ template<typename MatrixType> void matrixRedux(const MatrixType& m) // test nesting complex expression VERIFY_EVALUATION_COUNT( (m1.matrix()*m1.matrix().transpose()).sum(), (MatrixType::IsVectorAtCompileTime && MatrixType::SizeAtCompileTime!=1 ? 0 : 1) ); - Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> m2(rows,rows); - m2.setRandom(); VERIFY_EVALUATION_COUNT( ((m1.matrix()*m1.matrix().transpose())+m2).sum(),(MatrixType::IsVectorAtCompileTime && MatrixType::SizeAtCompileTime!=1 ? 0 : 1)); } template<typename VectorType> void vectorRedux(const VectorType& w) { using std::abs; - typedef typename VectorType::Index Index; typedef typename VectorType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; Index size = w.size(); @@ -146,7 +151,7 @@ template<typename VectorType> void vectorRedux(const VectorType& w) VERIFY_RAISES_ASSERT(v.head(0).maxCoeff()); } -void test_redux() +EIGEN_DECLARE_TEST(redux) { // the max size cannot be too large, otherwise reduxion operations obviously generate large errors. int maxsize = (std::min)(100,EIGEN_TEST_MAX_SIZE); diff --git a/test/ref.cpp b/test/ref.cpp index 769db0414..ebfc70d3d 100644 --- a/test/ref.cpp +++ b/test/ref.cpp @@ -13,7 +13,7 @@ #endif #define TEST_ENABLE_TEMPORARY_TRACKING - +#define TEST_CHECK_STATIC_ASSERTIONS #include "main.h" // test Ref.h @@ -32,7 +32,6 @@ template<typename MatrixType> void ref_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix<Scalar,Dynamic,Dynamic,MatrixType::Options> DynMatrixType; @@ -80,7 +79,6 @@ template<typename MatrixType> void ref_matrix(const MatrixType& m) template<typename VectorType> void ref_vector(const VectorType& m) { - typedef typename VectorType::Index Index; typedef typename VectorType::Scalar Scalar; typedef typename VectorType::RealScalar RealScalar; typedef Matrix<Scalar,Dynamic,1,VectorType::Options> DynMatrixType; @@ -104,10 +102,14 @@ template<typename VectorType> void ref_vector(const VectorType& m) Index i = internal::random<Index>(0,size-1); Index bsize = internal::random<Index>(1,size-i); - RefMat rm0 = v1; - VERIFY_IS_EQUAL(rm0, v1); - RefDynMat rv1 = v1; - VERIFY_IS_EQUAL(rv1, v1); + { RefMat rm0 = v1; VERIFY_IS_EQUAL(rm0, v1); } + { RefMat rm0 = v1.block(0,0,size,1); VERIFY_IS_EQUAL(rm0, v1); } + { RefDynMat rv1 = v1; VERIFY_IS_EQUAL(rv1, v1); } + { RefDynMat rv1 = v1.block(0,0,size,1); VERIFY_IS_EQUAL(rv1, v1); } + { VERIFY_RAISES_ASSERT( RefMat rm0 = v1.block(0, 0, size, 0); EIGEN_UNUSED_VARIABLE(rm0); ); } + if(VectorType::SizeAtCompileTime!=1) + { VERIFY_RAISES_ASSERT( RefDynMat rv1 = v1.block(0, 0, size, 0); EIGEN_UNUSED_VARIABLE(rv1); ); } + RefDynMat rv2 = v1.segment(i,bsize); VERIFY_IS_EQUAL(rv2, v1.segment(i,bsize)); rv2.setOnes(); @@ -139,6 +141,69 @@ template<typename VectorType> void ref_vector(const VectorType& m) VERIFY_IS_APPROX(mat1, mat2); } +template<typename Scalar, int Rows, int Cols> +void ref_vector_fixed_sizes() +{ + typedef Matrix<Scalar,Rows,Cols,RowMajor> RowMajorMatrixType; + typedef Matrix<Scalar,Rows,Cols,ColMajor> ColMajorMatrixType; + typedef Matrix<Scalar,1,Cols> RowVectorType; + typedef Matrix<Scalar,Rows,1> ColVectorType; + typedef Matrix<Scalar,Cols,1> RowVectorTransposeType; + typedef Matrix<Scalar,1,Rows> ColVectorTransposeType; + typedef Stride<Dynamic, Dynamic> DynamicStride; + + RowMajorMatrixType mr = RowMajorMatrixType::Random(); + ColMajorMatrixType mc = ColMajorMatrixType::Random(); + + Index i = internal::random<Index>(0,Rows-1); + Index j = internal::random<Index>(0,Cols-1); + + // Reference ith row. + Ref<RowVectorType, 0, DynamicStride> mr_ri = mr.row(i); + VERIFY_IS_EQUAL(mr_ri, mr.row(i)); + Ref<RowVectorType, 0, DynamicStride> mc_ri = mc.row(i); + VERIFY_IS_EQUAL(mc_ri, mc.row(i)); + + // Reference jth col. + Ref<ColVectorType, 0, DynamicStride> mr_cj = mr.col(j); + VERIFY_IS_EQUAL(mr_cj, mr.col(j)); + Ref<ColVectorType, 0, DynamicStride> mc_cj = mc.col(j); + VERIFY_IS_EQUAL(mc_cj, mc.col(j)); + + // Reference the transpose of row i. + Ref<RowVectorTransposeType, 0, DynamicStride> mr_rit = mr.row(i); + VERIFY_IS_EQUAL(mr_rit, mr.row(i).transpose()); + Ref<RowVectorTransposeType, 0, DynamicStride> mc_rit = mc.row(i); + VERIFY_IS_EQUAL(mc_rit, mc.row(i).transpose()); + + // Reference the transpose of col j. + Ref<ColVectorTransposeType, 0, DynamicStride> mr_cjt = mr.col(j); + VERIFY_IS_EQUAL(mr_cjt, mr.col(j).transpose()); + Ref<ColVectorTransposeType, 0, DynamicStride> mc_cjt = mc.col(j); + VERIFY_IS_EQUAL(mc_cjt, mc.col(j).transpose()); + + // Const references without strides. + Ref<const RowVectorType> cmr_ri = mr.row(i); + VERIFY_IS_EQUAL(cmr_ri, mr.row(i)); + Ref<const RowVectorType> cmc_ri = mc.row(i); + VERIFY_IS_EQUAL(cmc_ri, mc.row(i)); + + Ref<const ColVectorType> cmr_cj = mr.col(j); + VERIFY_IS_EQUAL(cmr_cj, mr.col(j)); + Ref<const ColVectorType> cmc_cj = mc.col(j); + VERIFY_IS_EQUAL(cmc_cj, mc.col(j)); + + Ref<const RowVectorTransposeType> cmr_rit = mr.row(i); + VERIFY_IS_EQUAL(cmr_rit, mr.row(i).transpose()); + Ref<const RowVectorTransposeType> cmc_rit = mc.row(i); + VERIFY_IS_EQUAL(cmc_rit, mc.row(i).transpose()); + + Ref<const ColVectorTransposeType> cmr_cjt = mr.col(j); + VERIFY_IS_EQUAL(cmr_cjt, mr.col(j).transpose()); + Ref<const ColVectorTransposeType> cmc_cjt = mc.col(j); + VERIFY_IS_EQUAL(cmc_cjt, mc.col(j).transpose()); +} + template<typename PlainObjectType> void check_const_correctness(const PlainObjectType&) { // verify that ref-to-const don't have LvalueBit @@ -255,7 +320,18 @@ void test_ref_overloads() test_ref_ambiguous(A, B); } -void test_ref() +void test_ref_fixed_size_assert() +{ + Vector4f v4 = Vector4f::Random(); + VectorXf vx = VectorXf::Random(10); + VERIFY_RAISES_STATIC_ASSERT( Ref<Vector3f> y = v4; (void)y; ); + VERIFY_RAISES_STATIC_ASSERT( Ref<Vector3f> y = vx.head<4>(); (void)y; ); + VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = v4; (void)y; ); + VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = vx.head<4>(); (void)y; ); + VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = 2*v4; (void)y; ); +} + +EIGEN_DECLARE_TEST(ref) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( ref_vector(Matrix<float, 1, 1>()) ); @@ -274,7 +350,11 @@ void test_ref() CALL_SUBTEST_4( ref_matrix(Matrix<std::complex<double>,10,15>()) ); CALL_SUBTEST_5( ref_matrix(MatrixXi(internal::random<int>(1,10),internal::random<int>(1,10))) ); CALL_SUBTEST_6( call_ref() ); + + CALL_SUBTEST_8( (ref_vector_fixed_sizes<float,3,5>()) ); + CALL_SUBTEST_8( (ref_vector_fixed_sizes<float,15,10>()) ); } CALL_SUBTEST_7( test_ref_overloads() ); + CALL_SUBTEST_7( test_ref_fixed_size_assert() ); } diff --git a/test/reshape.cpp b/test/reshape.cpp new file mode 100644 index 000000000..7b16742a2 --- /dev/null +++ b/test/reshape.cpp @@ -0,0 +1,216 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr> +// Copyright (C) 2014 yoco <peter.xiau@gmail.com> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +template<typename T1,typename T2> +typename internal::enable_if<internal::is_same<T1,T2>::value,bool>::type +is_same_eq(const T1& a, const T2& b) +{ + return (a.array() == b.array()).all(); +} + +template <int Order,typename MatType> +void check_auto_reshape4x4(MatType m) +{ + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 1> v1( 1); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 2> v2( 2); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 4> v4( 4); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 8> v8( 8); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1:16> v16(16); + + VERIFY(is_same_eq(m.template reshaped<Order>( 1, AutoSize), m.template reshaped<Order>( 1, 16))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, 16 ), m.template reshaped<Order>( 1, 16))); + VERIFY(is_same_eq(m.template reshaped<Order>( 2, AutoSize), m.template reshaped<Order>( 2, 8))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, 8 ), m.template reshaped<Order>( 2, 8))); + VERIFY(is_same_eq(m.template reshaped<Order>( 4, AutoSize), m.template reshaped<Order>( 4, 4))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, 4 ), m.template reshaped<Order>( 4, 4))); + VERIFY(is_same_eq(m.template reshaped<Order>( 8, AutoSize), m.template reshaped<Order>( 8, 2))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, 2 ), m.template reshaped<Order>( 8, 2))); + VERIFY(is_same_eq(m.template reshaped<Order>(16, AutoSize), m.template reshaped<Order>(16, 1))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, 1 ), m.template reshaped<Order>(16, 1))); + + VERIFY(is_same_eq(m.template reshaped<Order>(fix< 1>, AutoSize), m.template reshaped<Order>(fix< 1>, v16 ))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, fix<16> ), m.template reshaped<Order>( v1, fix<16>))); + VERIFY(is_same_eq(m.template reshaped<Order>(fix< 2>, AutoSize), m.template reshaped<Order>(fix< 2>, v8 ))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, fix< 8> ), m.template reshaped<Order>( v2, fix< 8>))); + VERIFY(is_same_eq(m.template reshaped<Order>(fix< 4>, AutoSize), m.template reshaped<Order>(fix< 4>, v4 ))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, fix< 4> ), m.template reshaped<Order>( v4, fix< 4>))); + VERIFY(is_same_eq(m.template reshaped<Order>(fix< 8>, AutoSize), m.template reshaped<Order>(fix< 8>, v2 ))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, fix< 2> ), m.template reshaped<Order>( v8, fix< 2>))); + VERIFY(is_same_eq(m.template reshaped<Order>(fix<16>, AutoSize), m.template reshaped<Order>(fix<16>, v1 ))); + VERIFY(is_same_eq(m.template reshaped<Order>(AutoSize, fix< 1> ), m.template reshaped<Order>(v16, fix< 1>))); +} + +template <typename MatType> +void check_direct_access_reshape4x4(MatType , internal::FixedInt<RowMajorBit>) {} + +template <typename MatType> +void check_direct_access_reshape4x4(MatType m, internal::FixedInt<0>) { + VERIFY_IS_EQUAL(m.reshaped( 1, 16).data(), m.data()); + VERIFY_IS_EQUAL(m.reshaped( 1, 16).innerStride(), 1); + + VERIFY_IS_EQUAL(m.reshaped( 2, 8).data(), m.data()); + VERIFY_IS_EQUAL(m.reshaped( 2, 8).innerStride(), 1); + VERIFY_IS_EQUAL(m.reshaped( 2, 8).outerStride(), 2); +} + +// just test a 4x4 matrix, enumerate all combination manually +template <typename MatType> +void reshape4x4(MatType m) +{ + typedef typename MatType::Scalar Scalar; + + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 1> v1( 1); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 2> v2( 2); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 4> v4( 4); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1: 8> v8( 8); + internal::VariableAndFixedInt<MatType::SizeAtCompileTime==Dynamic?-1:16> v16(16); + + if((MatType::Flags&RowMajorBit)==0) + { + typedef Map<MatrixXi> MapMat; + // dynamic + VERIFY_IS_EQUAL((m.reshaped( 1, 16)), MapMat(m.data(), 1, 16)); + VERIFY_IS_EQUAL((m.reshaped( 2, 8)), MapMat(m.data(), 2, 8)); + VERIFY_IS_EQUAL((m.reshaped( 4, 4)), MapMat(m.data(), 4, 4)); + VERIFY_IS_EQUAL((m.reshaped( 8, 2)), MapMat(m.data(), 8, 2)); + VERIFY_IS_EQUAL((m.reshaped(16, 1)), MapMat(m.data(), 16, 1)); + + // static + VERIFY_IS_EQUAL(m.reshaped(fix< 1>, fix<16>), MapMat(m.data(), 1, 16)); + VERIFY_IS_EQUAL(m.reshaped(fix< 2>, fix< 8>), MapMat(m.data(), 2, 8)); + VERIFY_IS_EQUAL(m.reshaped(fix< 4>, fix< 4>), MapMat(m.data(), 4, 4)); + VERIFY_IS_EQUAL(m.reshaped(fix< 8>, fix< 2>), MapMat(m.data(), 8, 2)); + VERIFY_IS_EQUAL(m.reshaped(fix<16>, fix< 1>), MapMat(m.data(), 16, 1)); + + + // reshape chain + VERIFY_IS_EQUAL( + (m + .reshaped( 1, 16) + .reshaped(fix< 2>,fix< 8>) + .reshaped(16, 1) + .reshaped(fix< 8>,fix< 2>) + .reshaped( 2, 8) + .reshaped(fix< 1>,fix<16>) + .reshaped( 4, 4) + .reshaped(fix<16>,fix< 1>) + .reshaped( 8, 2) + .reshaped(fix< 4>,fix< 4>) + ), + MapMat(m.data(), 4, 4) + ); + } + + VERIFY(is_same_eq(m.reshaped( 1, AutoSize), m.reshaped( 1, 16))); + VERIFY(is_same_eq(m.reshaped(AutoSize, 16), m.reshaped( 1, 16))); + VERIFY(is_same_eq(m.reshaped( 2, AutoSize), m.reshaped( 2, 8))); + VERIFY(is_same_eq(m.reshaped(AutoSize, 8), m.reshaped( 2, 8))); + VERIFY(is_same_eq(m.reshaped( 4, AutoSize), m.reshaped( 4, 4))); + VERIFY(is_same_eq(m.reshaped(AutoSize, 4), m.reshaped( 4, 4))); + VERIFY(is_same_eq(m.reshaped( 8, AutoSize), m.reshaped( 8, 2))); + VERIFY(is_same_eq(m.reshaped(AutoSize, 2), m.reshaped( 8, 2))); + VERIFY(is_same_eq(m.reshaped(16, AutoSize), m.reshaped(16, 1))); + VERIFY(is_same_eq(m.reshaped(AutoSize, 1), m.reshaped(16, 1))); + + VERIFY(is_same_eq(m.reshaped(fix< 1>, AutoSize), m.reshaped(fix< 1>, v16))); + VERIFY(is_same_eq(m.reshaped(AutoSize, fix<16>), m.reshaped( v1, fix<16>))); + VERIFY(is_same_eq(m.reshaped(fix< 2>, AutoSize), m.reshaped(fix< 2>, v8))); + VERIFY(is_same_eq(m.reshaped(AutoSize, fix< 8>), m.reshaped( v2, fix< 8>))); + VERIFY(is_same_eq(m.reshaped(fix< 4>, AutoSize), m.reshaped(fix< 4>, v4))); + VERIFY(is_same_eq(m.reshaped(AutoSize, fix< 4>), m.reshaped( v4, fix< 4>))); + VERIFY(is_same_eq(m.reshaped(fix< 8>, AutoSize), m.reshaped(fix< 8>, v2))); + VERIFY(is_same_eq(m.reshaped(AutoSize, fix< 2>), m.reshaped( v8, fix< 2>))); + VERIFY(is_same_eq(m.reshaped(fix<16>, AutoSize), m.reshaped(fix<16>, v1))); + VERIFY(is_same_eq(m.reshaped(AutoSize, fix< 1>), m.reshaped(v16, fix< 1>))); + + check_auto_reshape4x4<ColMajor> (m); + check_auto_reshape4x4<RowMajor> (m); + check_auto_reshape4x4<AutoOrder>(m); + check_auto_reshape4x4<ColMajor> (m.transpose()); + check_auto_reshape4x4<ColMajor> (m.transpose()); + check_auto_reshape4x4<AutoOrder>(m.transpose()); + + check_direct_access_reshape4x4(m,fix<MatType::Flags&RowMajorBit>); + + if((MatType::Flags&RowMajorBit)==0) + { + VERIFY_IS_EQUAL(m.template reshaped<ColMajor>(2,8),m.reshaped(2,8)); + VERIFY_IS_EQUAL(m.template reshaped<ColMajor>(2,8),m.template reshaped<AutoOrder>(2,8)); + VERIFY_IS_EQUAL(m.transpose().template reshaped<RowMajor>(2,8),m.transpose().template reshaped<AutoOrder>(2,8)); + } + else + { + VERIFY_IS_EQUAL(m.template reshaped<ColMajor>(2,8),m.reshaped(2,8)); + VERIFY_IS_EQUAL(m.template reshaped<RowMajor>(2,8),m.template reshaped<AutoOrder>(2,8)); + VERIFY_IS_EQUAL(m.transpose().template reshaped<ColMajor>(2,8),m.transpose().template reshaped<AutoOrder>(2,8)); + VERIFY_IS_EQUAL(m.transpose().reshaped(2,8),m.transpose().template reshaped<AutoOrder>(2,8)); + } + + MatrixXi m28r1 = m.template reshaped<RowMajor>(2,8); + MatrixXi m28r2 = m.transpose().template reshaped<ColMajor>(8,2).transpose(); + VERIFY_IS_EQUAL( m28r1, m28r2); + + VERIFY(is_same_eq(m.reshaped(v16,fix<1>), m.reshaped())); + VERIFY_IS_EQUAL(m.reshaped(16,1).eval(), m.reshaped().eval()); + VERIFY_IS_EQUAL(m.reshaped(1,16).eval(), m.reshaped().transpose().eval()); + VERIFY_IS_EQUAL(m.reshaped().reshaped(2,8), m.reshaped(2,8)); + VERIFY_IS_EQUAL(m.reshaped().reshaped(4,4), m.reshaped(4,4)); + VERIFY_IS_EQUAL(m.reshaped().reshaped(8,2), m.reshaped(8,2)); + + VERIFY_IS_EQUAL(m.reshaped(), m.template reshaped<ColMajor>()); + VERIFY_IS_EQUAL(m.transpose().reshaped(), m.template reshaped<RowMajor>()); + VERIFY_IS_EQUAL(m.template reshaped<RowMajor>(AutoSize,fix<1>), m.template reshaped<RowMajor>()); + VERIFY_IS_EQUAL(m.template reshaped<AutoOrder>(AutoSize,fix<1>), m.template reshaped<AutoOrder>()); + + VERIFY(is_same_eq(m.reshaped(AutoSize,fix<1>), m.reshaped())); + VERIFY_IS_EQUAL(m.template reshaped<RowMajor>(fix<1>,AutoSize), m.transpose().reshaped().transpose()); + + // check assignment + { + Matrix<Scalar,Dynamic,1> m1x(m.size()); m1x.setRandom(); + VERIFY_IS_APPROX(m.reshaped() = m1x, m1x); + VERIFY_IS_APPROX(m, m1x.reshaped(4,4)); + + Matrix<Scalar,Dynamic,Dynamic> m28(2,8); m28.setRandom(); + VERIFY_IS_APPROX(m.reshaped(2,8) = m28, m28); + VERIFY_IS_APPROX(m, m28.reshaped(4,4)); + VERIFY_IS_APPROX(m.template reshaped<RowMajor>(2,8) = m28, m28); + + Matrix<Scalar,Dynamic,Dynamic> m24(2,4); m24.setRandom(); + VERIFY_IS_APPROX(m(seq(0,last,2),all).reshaped(2,4) = m24, m24); + + // check constness: + m.reshaped(2,8).nestedExpression() = m; + } +} + +EIGEN_DECLARE_TEST(reshape) +{ + typedef Matrix<int,Dynamic,Dynamic,RowMajor> RowMatrixXi; + typedef Matrix<int,4,4,RowMajor> RowMatrix4i; + MatrixXi mx = MatrixXi::Random(4, 4); + Matrix4i m4 = Matrix4i::Random(4, 4); + RowMatrixXi rmx = RowMatrixXi::Random(4, 4); + RowMatrix4i rm4 = RowMatrix4i::Random(4, 4); + + // test dynamic-size matrix + CALL_SUBTEST(reshape4x4(mx)); + // test static-size matrix + CALL_SUBTEST(reshape4x4(m4)); + // test dynamic-size const matrix + CALL_SUBTEST(reshape4x4(static_cast<const MatrixXi>(mx))); + // test static-size const matrix + CALL_SUBTEST(reshape4x4(static_cast<const Matrix4i>(m4))); + + CALL_SUBTEST(reshape4x4(rmx)); + CALL_SUBTEST(reshape4x4(rm4)); +} diff --git a/test/resize.cpp b/test/resize.cpp index 4adaafe56..646a75b8f 100644 --- a/test/resize.cpp +++ b/test/resize.cpp @@ -33,7 +33,7 @@ void resizeLikeTest12() { resizeLikeTest<1,2>(); } void resizeLikeTest1020() { resizeLikeTest<10,20>(); } void resizeLikeTest31() { resizeLikeTest<3,1>(); } -void test_resize() +EIGEN_DECLARE_TEST(resize) { CALL_SUBTEST(resizeLikeTest12() ); CALL_SUBTEST(resizeLikeTest1020() ); diff --git a/test/rvalue_types.cpp b/test/rvalue_types.cpp index 8887f1b1b..2c9999ce8 100644 --- a/test/rvalue_types.cpp +++ b/test/rvalue_types.cpp @@ -7,7 +7,13 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#define EIGEN_RUNTIME_NO_MALLOC + #include "main.h" +#if EIGEN_HAS_CXX11 +#include "MovableScalar.h" +#endif +#include "SafeScalar.h" #include <Eigen/Core> @@ -24,41 +30,128 @@ void rvalue_copyassign(const MatrixType& m) MatrixType tmp = m; UIntPtr src_address = reinterpret_cast<UIntPtr>(tmp.data()); + Eigen::internal::set_is_malloc_allowed(false); // moving from an rvalue reference shall never allocate // move the temporary to n MatrixType n = std::move(tmp); UIntPtr dst_address = reinterpret_cast<UIntPtr>(n.data()); - if (MatrixType::RowsAtCompileTime==Dynamic|| MatrixType::ColsAtCompileTime==Dynamic) { // verify that we actually moved the guts VERIFY_IS_EQUAL(src_address, dst_address); + VERIFY_IS_EQUAL(tmp.size(), 0); + VERIFY_IS_EQUAL(reinterpret_cast<UIntPtr>(tmp.data()), UIntPtr(0)); } // verify that the content did not change Scalar abs_diff = (m-n).array().abs().sum(); VERIFY_IS_EQUAL(abs_diff, Scalar(0)); + Eigen::internal::set_is_malloc_allowed(true); +} +template<typename TranspositionsType> +void rvalue_transpositions(Index rows) +{ + typedef typename TranspositionsType::IndicesType PermutationVectorType; + + PermutationVectorType vec; + randomPermutationVector(vec, rows); + TranspositionsType t0(vec); + + Eigen::internal::set_is_malloc_allowed(false); // moving from an rvalue reference shall never allocate + + UIntPtr t0_address = reinterpret_cast<UIntPtr>(t0.indices().data()); + + // Move constructors: + TranspositionsType t1 = std::move(t0); + UIntPtr t1_address = reinterpret_cast<UIntPtr>(t1.indices().data()); + VERIFY_IS_EQUAL(t0_address, t1_address); + // t0 must be de-allocated: + VERIFY_IS_EQUAL(t0.size(), 0); + VERIFY_IS_EQUAL(reinterpret_cast<UIntPtr>(t0.indices().data()), UIntPtr(0)); + + + // Move assignment: + t0 = std::move(t1); + t0_address = reinterpret_cast<UIntPtr>(t0.indices().data()); + VERIFY_IS_EQUAL(t0_address, t1_address); + // t1 must be de-allocated: + VERIFY_IS_EQUAL(t1.size(), 0); + VERIFY_IS_EQUAL(reinterpret_cast<UIntPtr>(t1.indices().data()), UIntPtr(0)); + + Eigen::internal::set_is_malloc_allowed(true); +} + +template <typename MatrixType> +void rvalue_move(const MatrixType& m) +{ + // lvalue reference is copied + MatrixType b(m); + VERIFY_IS_EQUAL(b, m); + + // lvalue reference is copied + MatrixType c{m}; + VERIFY_IS_EQUAL(c, m); + + // lvalue reference is copied + MatrixType d = m; + VERIFY_IS_EQUAL(d, m); + + // rvalue reference is moved - copy constructor. + MatrixType e_src(m); + VERIFY_IS_EQUAL(e_src, m); + MatrixType e_dst(std::move(e_src)); + VERIFY_IS_EQUAL(e_dst, m); + + // rvalue reference is moved - copy constructor. + MatrixType f_src(m); + VERIFY_IS_EQUAL(f_src, m); + MatrixType f_dst = std::move(f_src); + VERIFY_IS_EQUAL(f_dst, m); + + // rvalue reference is moved - copy assignment. + MatrixType g_src(m); + VERIFY_IS_EQUAL(g_src, m); + MatrixType g_dst; + g_dst = std::move(g_src); + VERIFY_IS_EQUAL(g_dst, m); } #else template <typename MatrixType> void rvalue_copyassign(const MatrixType&) {} +template<typename TranspositionsType> +void rvalue_transpositions(Index) {} +template <typename MatrixType> +void rvalue_move(const MatrixType&) {} #endif -void test_rvalue_types() +EIGEN_DECLARE_TEST(rvalue_types) { - CALL_SUBTEST_1(rvalue_copyassign( MatrixXf::Random(50,50).eval() )); - CALL_SUBTEST_1(rvalue_copyassign( ArrayXXf::Random(50,50).eval() )); + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(rvalue_copyassign( MatrixXf::Random(50,50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( ArrayXXf::Random(50,50).eval() )); + + CALL_SUBTEST_1(rvalue_copyassign( Matrix<float,1,Dynamic>::Random(50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( Array<float,1,Dynamic>::Random(50).eval() )); - CALL_SUBTEST_1(rvalue_copyassign( Matrix<float,1,Dynamic>::Random(50).eval() )); - CALL_SUBTEST_1(rvalue_copyassign( Array<float,1,Dynamic>::Random(50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( Matrix<float,Dynamic,1>::Random(50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( Array<float,Dynamic,1>::Random(50).eval() )); - CALL_SUBTEST_1(rvalue_copyassign( Matrix<float,Dynamic,1>::Random(50).eval() )); - CALL_SUBTEST_1(rvalue_copyassign( Array<float,Dynamic,1>::Random(50).eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array<float,2,1>::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array<float,3,1>::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array<float,4,1>::Random().eval() )); + + CALL_SUBTEST_2(rvalue_copyassign( Array<float,2,2>::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array<float,3,3>::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array<float,4,4>::Random().eval() )); - CALL_SUBTEST_2(rvalue_copyassign( Array<float,2,1>::Random().eval() )); - CALL_SUBTEST_2(rvalue_copyassign( Array<float,3,1>::Random().eval() )); - CALL_SUBTEST_2(rvalue_copyassign( Array<float,4,1>::Random().eval() )); + CALL_SUBTEST_3((rvalue_transpositions<PermutationMatrix<Dynamic, Dynamic, int> >(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_3((rvalue_transpositions<PermutationMatrix<Dynamic, Dynamic, Index> >(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_4((rvalue_transpositions<Transpositions<Dynamic, Dynamic, int> >(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_4((rvalue_transpositions<Transpositions<Dynamic, Dynamic, Index> >(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)))); - CALL_SUBTEST_2(rvalue_copyassign( Array<float,2,2>::Random().eval() )); - CALL_SUBTEST_2(rvalue_copyassign( Array<float,3,3>::Random().eval() )); - CALL_SUBTEST_2(rvalue_copyassign( Array<float,4,4>::Random().eval() )); +#if EIGEN_HAS_CXX11 + CALL_SUBTEST_5(rvalue_move(Eigen::Matrix<MovableScalar<float>,1,3>::Random().eval())); + CALL_SUBTEST_5(rvalue_move(Eigen::Matrix<SafeScalar<float>,1,3>::Random().eval())); + CALL_SUBTEST_5(rvalue_move(Eigen::Matrix<SafeScalar<float>,Eigen::Dynamic,Eigen::Dynamic>::Random(1,3).eval())); +#endif + } } diff --git a/test/schur_complex.cpp b/test/schur_complex.cpp index deb78e44e..03e17e81d 100644 --- a/test/schur_complex.cpp +++ b/test/schur_complex.cpp @@ -79,7 +79,7 @@ template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTim } } -void test_schur_complex() +EIGEN_DECLARE_TEST(schur_complex) { CALL_SUBTEST_1(( schur<Matrix4cd>() )); CALL_SUBTEST_2(( schur<MatrixXcf>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) )); diff --git a/test/schur_real.cpp b/test/schur_real.cpp index 4aede87df..945461027 100644 --- a/test/schur_real.cpp +++ b/test/schur_real.cpp @@ -13,8 +13,6 @@ template<typename MatrixType> void verifyIsQuasiTriangular(const MatrixType& T) { - typedef typename MatrixType::Index Index; - const Index size = T.cols(); typedef typename MatrixType::Scalar Scalar; @@ -100,7 +98,7 @@ template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTim } } -void test_schur_real() +EIGEN_DECLARE_TEST(schur_real) { CALL_SUBTEST_1(( schur<Matrix4f>() )); CALL_SUBTEST_2(( schur<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) )); diff --git a/test/selfadjoint.cpp b/test/selfadjoint.cpp index 92401e506..9ca9cef9e 100644 --- a/test/selfadjoint.cpp +++ b/test/selfadjoint.cpp @@ -7,6 +7,7 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#define TEST_CHECK_STATIC_ASSERTIONS #include "main.h" // This file tests the basic selfadjointView API, @@ -14,7 +15,6 @@ template<typename MatrixType> void selfadjoint(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); @@ -45,6 +45,9 @@ template<typename MatrixType> void selfadjoint(const MatrixType& m) m4 = m2; m4 -= m1.template selfadjointView<Lower>(); VERIFY_IS_APPROX(m4, m2-m3); + + VERIFY_RAISES_STATIC_ASSERT(m2.template selfadjointView<StrictlyUpper>()); + VERIFY_RAISES_STATIC_ASSERT(m2.template selfadjointView<UnitLower>()); } void bug_159() @@ -53,7 +56,7 @@ void bug_159() EIGEN_UNUSED_VARIABLE(m) } -void test_selfadjoint() +EIGEN_DECLARE_TEST(selfadjoint) { for(int i = 0; i < g_repeat ; i++) { diff --git a/test/simplicial_cholesky.cpp b/test/simplicial_cholesky.cpp index 649c817b4..538d01ab5 100644 --- a/test/simplicial_cholesky.cpp +++ b/test/simplicial_cholesky.cpp @@ -9,17 +9,17 @@ #include "sparse_solver.h" -template<typename T, typename I> void test_simplicial_cholesky_T() +template<typename T, typename I_, int flag> void test_simplicial_cholesky_T() { - typedef SparseMatrix<T,0,I> SparseMatrixType; + typedef SparseMatrix<T,flag,I_> SparseMatrixType; SimplicialCholesky<SparseMatrixType, Lower> chol_colmajor_lower_amd; SimplicialCholesky<SparseMatrixType, Upper> chol_colmajor_upper_amd; SimplicialLLT< SparseMatrixType, Lower> llt_colmajor_lower_amd; SimplicialLLT< SparseMatrixType, Upper> llt_colmajor_upper_amd; SimplicialLDLT< SparseMatrixType, Lower> ldlt_colmajor_lower_amd; SimplicialLDLT< SparseMatrixType, Upper> ldlt_colmajor_upper_amd; - SimplicialLDLT< SparseMatrixType, Lower, NaturalOrdering<I> > ldlt_colmajor_lower_nat; - SimplicialLDLT< SparseMatrixType, Upper, NaturalOrdering<I> > ldlt_colmajor_upper_nat; + SimplicialLDLT< SparseMatrixType, Lower, NaturalOrdering<I_> > ldlt_colmajor_lower_nat; + SimplicialLDLT< SparseMatrixType, Upper, NaturalOrdering<I_> > ldlt_colmajor_upper_nat; check_sparse_spd_solving(chol_colmajor_lower_amd); check_sparse_spd_solving(chol_colmajor_upper_amd); @@ -35,13 +35,16 @@ template<typename T, typename I> void test_simplicial_cholesky_T() check_sparse_spd_determinant(ldlt_colmajor_lower_amd); check_sparse_spd_determinant(ldlt_colmajor_upper_amd); - check_sparse_spd_solving(ldlt_colmajor_lower_nat, 300, 1000); - check_sparse_spd_solving(ldlt_colmajor_upper_nat, 300, 1000); + check_sparse_spd_solving(ldlt_colmajor_lower_nat, (std::min)(300,EIGEN_TEST_MAX_SIZE), 1000); + check_sparse_spd_solving(ldlt_colmajor_upper_nat, (std::min)(300,EIGEN_TEST_MAX_SIZE), 1000); } -void test_simplicial_cholesky() +EIGEN_DECLARE_TEST(simplicial_cholesky) { - CALL_SUBTEST_1(( test_simplicial_cholesky_T<double,int>() )); - CALL_SUBTEST_2(( test_simplicial_cholesky_T<std::complex<double>, int>() )); - CALL_SUBTEST_3(( test_simplicial_cholesky_T<double,long int>() )); + CALL_SUBTEST_11(( test_simplicial_cholesky_T<double, int, ColMajor>() )); + CALL_SUBTEST_12(( test_simplicial_cholesky_T<std::complex<double>, int, ColMajor>() )); + CALL_SUBTEST_13(( test_simplicial_cholesky_T<double, long int, ColMajor>() )); + CALL_SUBTEST_21(( test_simplicial_cholesky_T<double, int, RowMajor>() )); + CALL_SUBTEST_22(( test_simplicial_cholesky_T<std::complex<double>, int, RowMajor>() )); + CALL_SUBTEST_23(( test_simplicial_cholesky_T<double, long int, RowMajor>() )); } diff --git a/test/sizeof.cpp b/test/sizeof.cpp index 03ad20453..af34e97dd 100644 --- a/test/sizeof.cpp +++ b/test/sizeof.cpp @@ -15,10 +15,10 @@ template<typename MatrixType> void verifySizeOf(const MatrixType&) if (MatrixType::RowsAtCompileTime!=Dynamic && MatrixType::ColsAtCompileTime!=Dynamic) VERIFY_IS_EQUAL(std::ptrdiff_t(sizeof(MatrixType)),std::ptrdiff_t(sizeof(Scalar))*std::ptrdiff_t(MatrixType::SizeAtCompileTime)); else - VERIFY_IS_EQUAL(sizeof(MatrixType),sizeof(Scalar*) + 2 * sizeof(typename MatrixType::Index)); + VERIFY_IS_EQUAL(sizeof(MatrixType),sizeof(Scalar*) + 2 * sizeof(Index)); } -void test_sizeof() +EIGEN_DECLARE_TEST(sizeof) { CALL_SUBTEST(verifySizeOf(Matrix<float, 1, 1>()) ); CALL_SUBTEST(verifySizeOf(Array<float, 2, 1>()) ); diff --git a/test/sizeoverflow.cpp b/test/sizeoverflow.cpp index 240d22294..421351233 100644 --- a/test/sizeoverflow.cpp +++ b/test/sizeoverflow.cpp @@ -34,7 +34,7 @@ void triggerVectorBadAlloc(Index size) VERIFY_THROWS_BADALLOC( VectorType v; v.conservativeResize(size) ); } -void test_sizeoverflow() +EIGEN_DECLARE_TEST(sizeoverflow) { // there are 2 levels of overflow checking. first in PlainObjectBase.h we check for overflow in rows*cols computations. // this is tested in tests of the form times_itself_gives_0 * times_itself_gives_0 diff --git a/test/smallvectors.cpp b/test/smallvectors.cpp index 781511397..f9803acbb 100644 --- a/test/smallvectors.cpp +++ b/test/smallvectors.cpp @@ -57,7 +57,7 @@ template<typename Scalar> void smallVectors() } } -void test_smallvectors() +EIGEN_DECLARE_TEST(smallvectors) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST(smallVectors<int>() ); diff --git a/test/solverbase.h b/test/solverbase.h new file mode 100644 index 000000000..13c09593a --- /dev/null +++ b/test/solverbase.h @@ -0,0 +1,36 @@ +#ifndef TEST_SOLVERBASE_H +#define TEST_SOLVERBASE_H + +template<typename DstType, typename RhsType, typename MatrixType, typename SolverType> +void check_solverbase(const MatrixType& matrix, const SolverType& solver, Index rows, Index cols, Index cols2) +{ + // solve + DstType m2 = DstType::Random(cols,cols2); + RhsType m3 = matrix*m2; + DstType solver_solution = DstType::Random(cols,cols2); + solver._solve_impl(m3, solver_solution); + VERIFY_IS_APPROX(m3, matrix*solver_solution); + solver_solution = DstType::Random(cols,cols2); + solver_solution = solver.solve(m3); + VERIFY_IS_APPROX(m3, matrix*solver_solution); + // test solve with transposed + m3 = RhsType::Random(rows,cols2); + m2 = matrix.transpose()*m3; + RhsType solver_solution2 = RhsType::Random(rows,cols2); + solver.template _solve_impl_transposed<false>(m2, solver_solution2); + VERIFY_IS_APPROX(m2, matrix.transpose()*solver_solution2); + solver_solution2 = RhsType::Random(rows,cols2); + solver_solution2 = solver.transpose().solve(m2); + VERIFY_IS_APPROX(m2, matrix.transpose()*solver_solution2); + // test solve with conjugate transposed + m3 = RhsType::Random(rows,cols2); + m2 = matrix.adjoint()*m3; + solver_solution2 = RhsType::Random(rows,cols2); + solver.template _solve_impl_transposed<true>(m2, solver_solution2); + VERIFY_IS_APPROX(m2, matrix.adjoint()*solver_solution2); + solver_solution2 = RhsType::Random(rows,cols2); + solver_solution2 = solver.adjoint().solve(m2); + VERIFY_IS_APPROX(m2, matrix.adjoint()*solver_solution2); +} + +#endif // TEST_SOLVERBASE_H diff --git a/test/sparse.h b/test/sparse.h index 9912e1e24..6cd07fc0a 100644 --- a/test/sparse.h +++ b/test/sparse.h @@ -14,7 +14,7 @@ #include "main.h" -#if EIGEN_GNUC_AT_LEAST(4,0) && !defined __ICC && !defined(__clang__) +#if EIGEN_HAS_CXX11 #ifdef min #undef min @@ -24,15 +24,9 @@ #undef max #endif -#include <tr1/unordered_map> +#include <unordered_map> #define EIGEN_UNORDERED_MAP_SUPPORT -namespace std { - using std::tr1::unordered_map; -} -#endif -#ifdef EIGEN_GOOGLEHASH_SUPPORT - #include <google/sparse_hash_map> #endif #include <Eigen/Cholesky> diff --git a/test/sparseLM.cpp b/test/sparseLM.cpp index 8e148f9bc..a48fcb685 100644 --- a/test/sparseLM.cpp +++ b/test/sparseLM.cpp @@ -168,7 +168,7 @@ void test_sparseLM_T() return ; } -void test_sparseLM() +EIGEN_DECLARE_TEST(sparseLM) { CALL_SUBTEST_1(test_sparseLM_T<double>()); diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp index 384985028..9453111b7 100644 --- a/test/sparse_basic.cpp +++ b/test/sparse_basic.cpp @@ -9,9 +9,16 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#ifndef EIGEN_SPARSE_TEST_INCLUDED_FROM_SPARSE_EXTRA static long g_realloc_count = 0; #define EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN g_realloc_count++; +static long g_dense_op_sparse_count = 0; +#define EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN g_dense_op_sparse_count++; +#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN g_dense_op_sparse_count+=10; +#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN g_dense_op_sparse_count+=20; +#endif + #include "sparse.h" template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& ref) @@ -194,6 +201,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(refM4.cwiseProduct(m3), refM4.cwiseProduct(refM3)); // VERIFY_IS_APPROX(m3.cwise()/refM4, refM3.cwise()/refM4); + // mixed sparse-dense VERIFY_IS_APPROX(refM4 + m3, refM4 + refM3); VERIFY_IS_APPROX(m3 + refM4, refM3 + refM4); VERIFY_IS_APPROX(refM4 - m3, refM4 - refM3); @@ -222,14 +230,34 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(m1+=m2, refM1+=refM2); VERIFY_IS_APPROX(m1-=m2, refM1-=refM2); + refM3 = refM1; + + VERIFY_IS_APPROX(refM1+=m2, refM3+=refM2); + VERIFY_IS_APPROX(refM1-=m2, refM3-=refM2); + + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1 =m2+refM4, refM3 =refM2+refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,10); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1+=m2+refM4, refM3+=refM2+refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1-=m2+refM4, refM3-=refM2+refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1 =refM4+m2, refM3 =refM2+refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1+=refM4+m2, refM3+=refM2+refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1-=refM4+m2, refM3-=refM2+refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1 =m2-refM4, refM3 =refM2-refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,20); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1+=m2-refM4, refM3+=refM2-refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1-=m2-refM4, refM3-=refM2-refM4); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1 =refM4-m2, refM3 =refM4-refM2); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1+=refM4-m2, refM3+=refM4-refM2); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + g_dense_op_sparse_count=0; VERIFY_IS_APPROX(refM1-=refM4-m2, refM3-=refM4-refM2); VERIFY_IS_EQUAL(g_dense_op_sparse_count,1); + refM3 = m3; + if (rows>=2 && cols>=2) { VERIFY_RAISES_ASSERT( m1 += m1.innerVector(0) ); VERIFY_RAISES_ASSERT( m1 -= m1.innerVector(0) ); VERIFY_RAISES_ASSERT( refM1 -= m1.innerVector(0) ); VERIFY_RAISES_ASSERT( refM1 += m1.innerVector(0) ); - m1 = m4; refM1 = refM4; } + m1 = m4; refM1 = refM4; // test aliasing VERIFY_IS_APPROX((m1 = -m1), (refM1 = -refM1)); @@ -385,7 +413,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re m.setFromTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>()); VERIFY_IS_APPROX(m, refMat_prod); -#if (defined(__cplusplus) && __cplusplus >= 201103L) +#if (EIGEN_COMP_CXXVER >= 11) m.setFromTriplets(triplets.begin(), triplets.end(), [] (Scalar,Scalar b) { return b; }); VERIFY_IS_APPROX(m, refMat_last); #endif @@ -518,7 +546,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re { DenseVector d = DenseVector::Random(rows); DenseMatrix refMat2 = d.asDiagonal(); - SparseMatrixType m2(rows, rows); + SparseMatrixType m2; m2 = d.asDiagonal(); VERIFY_IS_APPROX(m2, refMat2); SparseMatrixType m3(d.asDiagonal()); @@ -526,6 +554,28 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re refMat2 += d.asDiagonal(); m2 += d.asDiagonal(); VERIFY_IS_APPROX(m2, refMat2); + m2.setZero(); m2 += d.asDiagonal(); + refMat2.setZero(); refMat2 += d.asDiagonal(); + VERIFY_IS_APPROX(m2, refMat2); + m2.setZero(); m2 -= d.asDiagonal(); + refMat2.setZero(); refMat2 -= d.asDiagonal(); + VERIFY_IS_APPROX(m2, refMat2); + + initSparse<Scalar>(density, refMat2, m2); + m2.makeCompressed(); + m2 += d.asDiagonal(); + refMat2 += d.asDiagonal(); + VERIFY_IS_APPROX(m2, refMat2); + + initSparse<Scalar>(density, refMat2, m2); + m2.makeCompressed(); + VectorXi res(rows); + for(Index i=0; i<rows; ++i) + res(i) = internal::random<int>(0,3); + m2.reserve(res); + m2 -= d.asDiagonal(); + refMat2 -= d.asDiagonal(); + VERIFY_IS_APPROX(m2, refMat2); } // test conservative resize @@ -537,30 +587,38 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re inc.push_back(std::pair<StorageIndex,StorageIndex>(3,2)); inc.push_back(std::pair<StorageIndex,StorageIndex>(3,0)); inc.push_back(std::pair<StorageIndex,StorageIndex>(0,3)); - + inc.push_back(std::pair<StorageIndex,StorageIndex>(0,-1)); + inc.push_back(std::pair<StorageIndex,StorageIndex>(-1,0)); + inc.push_back(std::pair<StorageIndex,StorageIndex>(-1,-1)); + for(size_t i = 0; i< inc.size(); i++) { StorageIndex incRows = inc[i].first; StorageIndex incCols = inc[i].second; SparseMatrixType m1(rows, cols); DenseMatrix refMat1 = DenseMatrix::Zero(rows, cols); initSparse<Scalar>(density, refMat1, m1); - + + SparseMatrixType m2 = m1; + m2.makeCompressed(); + m1.conservativeResize(rows+incRows, cols+incCols); + m2.conservativeResize(rows+incRows, cols+incCols); refMat1.conservativeResize(rows+incRows, cols+incCols); if (incRows > 0) refMat1.bottomRows(incRows).setZero(); if (incCols > 0) refMat1.rightCols(incCols).setZero(); - + VERIFY_IS_APPROX(m1, refMat1); - + VERIFY_IS_APPROX(m2, refMat1); + // Insert new values if (incRows > 0) m1.insert(m1.rows()-1, 0) = refMat1(refMat1.rows()-1, 0) = 1; if (incCols > 0) m1.insert(0, m1.cols()-1) = refMat1(0, refMat1.cols()-1) = 1; - + VERIFY_IS_APPROX(m1, refMat1); - - + + } } @@ -612,6 +670,14 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re iters[0] = IteratorType(m2,0); iters[1] = IteratorType(m2,m2.outerSize()-1); } + + // test reserve with empty rows/columns + { + SparseMatrixType m1(0,cols); + m1.reserve(ArrayXi::Constant(m1.outerSize(),1)); + SparseMatrixType m2(rows,0); + m2.reserve(ArrayXi::Constant(m2.outerSize(),1)); + } } @@ -622,7 +688,7 @@ void big_sparse_triplet(Index rows, Index cols, double density) { typedef Triplet<Scalar,Index> TripletType; std::vector<TripletType> triplets; double nelements = density * rows*cols; - VERIFY(nelements>=0 && nelements < NumTraits<StorageIndex>::highest()); + VERIFY(nelements>=0 && nelements < static_cast<double>(NumTraits<StorageIndex>::highest())); Index ntriplets = Index(nelements); triplets.reserve(ntriplets); Scalar sum = Scalar(0); @@ -630,7 +696,8 @@ void big_sparse_triplet(Index rows, Index cols, double density) { { Index r = internal::random<Index>(0,rows-1); Index c = internal::random<Index>(0,cols-1); - Scalar v = internal::random<Scalar>(); + // use positive values to prevent numerical cancellation errors in sum + Scalar v = numext::abs(internal::random<Scalar>()); triplets.push_back(TripletType(r,c,v)); sum += v; } @@ -640,9 +707,26 @@ void big_sparse_triplet(Index rows, Index cols, double density) { VERIFY_IS_APPROX(sum, m.sum()); } +template<int> +void bug1105() +{ + // Regression test for bug 1105 + int n = Eigen::internal::random<int>(200,600); + SparseMatrix<std::complex<double>,0, long> mat(n, n); + std::complex<double> val; + + for(int i=0; i<n; ++i) + { + mat.coeffRef(i, i%(n/10)) = val; + VERIFY(mat.data().allocatedSize()<20*n); + } +} + +#ifndef EIGEN_SPARSE_TEST_INCLUDED_FROM_SPARSE_EXTRA -void test_sparse_basic() +EIGEN_DECLARE_TEST(sparse_basic) { + g_dense_op_sparse_count = 0; // Suppresses compiler warning. for(int i = 0; i < g_repeat; i++) { int r = Eigen::internal::random<int>(1,200), c = Eigen::internal::random<int>(1,200); if(Eigen::internal::random<int>(0,4) == 0) { @@ -671,18 +755,6 @@ void test_sparse_basic() CALL_SUBTEST_3((big_sparse_triplet<SparseMatrix<float, RowMajor, int> >(10000, 10000, 0.125))); CALL_SUBTEST_4((big_sparse_triplet<SparseMatrix<double, ColMajor, long int> >(10000, 10000, 0.125))); - // Regression test for bug 1105 -#ifdef EIGEN_TEST_PART_7 - { - int n = Eigen::internal::random<int>(200,600); - SparseMatrix<std::complex<double>,0, long> mat(n, n); - std::complex<double> val; - - for(int i=0; i<n; ++i) - { - mat.coeffRef(i, i%(n/10)) = val; - VERIFY(mat.data().allocatedSize()<20*n); - } - } -#endif + CALL_SUBTEST_7( bug1105<0>() ); } +#endif diff --git a/test/sparse_block.cpp b/test/sparse_block.cpp index 2a0b3b617..b4905b053 100644 --- a/test/sparse_block.cpp +++ b/test/sparse_block.cpp @@ -8,6 +8,7 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "sparse.h" +#include "AnnoyingScalar.h" template<typename T> typename Eigen::internal::enable_if<(T::Flags&RowMajorBit)==RowMajorBit, typename T::RowXpr>::type @@ -31,6 +32,7 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re const Index outer = ref.outerSize(); typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::RealScalar RealScalar; typedef typename SparseMatrixType::StorageIndex StorageIndex; double density = (std::max)(8./(rows*cols), 0.01); @@ -164,14 +166,14 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re { VERIFY(j==numext::real(m3.innerVector(j).nonZeros())); if(j>0) - VERIFY(j==numext::real(m3.innerVector(j).lastCoeff())); + VERIFY(RealScalar(j)==numext::real(m3.innerVector(j).lastCoeff())); } m3.makeCompressed(); for(Index j=0; j<(std::min)(outer, inner); ++j) { VERIFY(j==numext::real(m3.innerVector(j).nonZeros())); if(j>0) - VERIFY(j==numext::real(m3.innerVector(j).lastCoeff())); + VERIFY(RealScalar(j)==numext::real(m3.innerVector(j).lastCoeff())); } VERIFY(m3.innerVector(j0).nonZeros() == m3.transpose().innerVector(j0).nonZeros()); @@ -288,7 +290,7 @@ template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& re } } -void test_sparse_block() +EIGEN_DECLARE_TEST(sparse_block) { for(int i = 0; i < g_repeat; i++) { int r = Eigen::internal::random<int>(1,200), c = Eigen::internal::random<int>(1,200); @@ -313,5 +315,9 @@ void test_sparse_block() CALL_SUBTEST_4(( sparse_block(SparseMatrix<double,ColMajor,short int>(short(r), short(c))) )); CALL_SUBTEST_4(( sparse_block(SparseMatrix<double,RowMajor,short int>(short(r), short(c))) )); +#ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW + AnnoyingScalar::dont_throw = true; +#endif + CALL_SUBTEST_5(( sparse_block(SparseMatrix<AnnoyingScalar>(r,c)) )); } } diff --git a/test/sparse_permutations.cpp b/test/sparse_permutations.cpp index b82cceff8..e93493c39 100644 --- a/test/sparse_permutations.cpp +++ b/test/sparse_permutations.cpp @@ -220,7 +220,7 @@ template<typename Scalar> void sparse_permutations_all(int size) CALL_SUBTEST(( sparse_permutations<RowMajor>(SparseMatrix<Scalar, RowMajor>(size,size)) )); } -void test_sparse_permutations() +EIGEN_DECLARE_TEST(sparse_permutations) { for(int i = 0; i < g_repeat; i++) { int s = Eigen::internal::random<int>(1,50); diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp index 197586741..6e85f6914 100644 --- a/test/sparse_product.cpp +++ b/test/sparse_product.cpp @@ -7,6 +7,12 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#if defined(_MSC_VER) && (_MSC_VER==1800) +// This unit test takes forever to compile in Release mode with MSVC 2013, +// multiple hours. So let's switch off optimization for this one. +#pragma optimize("",off) +#endif + static long int nb_temporaries; inline void on_temporary_creation() { @@ -94,13 +100,15 @@ template<typename SparseMatrixType> void sparse_product() VERIFY_IS_APPROX(m4=(m2t.transpose()*m3t.transpose()).pruned(0), refMat4=refMat2t.transpose()*refMat3t.transpose()); VERIFY_IS_APPROX(m4=(m2*m3t.transpose()).pruned(0), refMat4=refMat2*refMat3t.transpose()); +#ifndef EIGEN_SPARSE_PRODUCT_IGNORE_TEMPORARY_COUNT // make sure the right product implementation is called: if((!SparseMatrixType::IsRowMajor) && m2.rows()<=m3.cols()) { - VERIFY_EVALUATION_COUNT(m4 = m2*m3, 3); // 1 temp for the result + 2 for transposing and get a sorted result. + VERIFY_EVALUATION_COUNT(m4 = m2*m3, 2); // 2 for transposing and get a sorted result. VERIFY_EVALUATION_COUNT(m4 = (m2*m3).pruned(0), 1); VERIFY_EVALUATION_COUNT(m4 = (m2*m3).eval().pruned(0), 4); } +#endif // and that pruning is effective: { @@ -145,7 +153,7 @@ template<typename SparseMatrixType> void sparse_product() VERIFY_IS_APPROX(dm4.noalias()-=m2*refMat3, refMat4-=refMat2*refMat3); VERIFY_IS_APPROX(dm4=m2*(refMat3+refMat3), refMat4=refMat2*(refMat3+refMat3)); VERIFY_IS_APPROX(dm4=m2t.transpose()*(refMat3+refMat5)*0.5, refMat4=refMat2t.transpose()*(refMat3+refMat5)*0.5); - + // sparse * dense vector VERIFY_IS_APPROX(dm4.col(0)=m2*refMat3.col(0), refMat4.col(0)=refMat2*refMat3.col(0)); VERIFY_IS_APPROX(dm4.col(0)=m2*refMat3t.transpose().col(0), refMat4.col(0)=refMat2*refMat3t.transpose().col(0)); @@ -176,7 +184,7 @@ template<typename SparseMatrixType> void sparse_product() VERIFY_IS_APPROX( m4=m2.middleCols(c,1)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose()); VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count()); VERIFY_IS_APPROX(dm4=m2.col(c)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose()); - + VERIFY_IS_APPROX(m4=dm5.col(c1)*m2.col(c).transpose(), refMat4=dm5.col(c1)*refMat2.col(c).transpose()); VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count()); VERIFY_IS_APPROX(m4=dm5.col(c1)*m2.middleCols(c,1).transpose(), refMat4=dm5.col(c1)*refMat2.col(c).transpose()); @@ -205,23 +213,23 @@ template<typename SparseMatrixType> void sparse_product() } VERIFY_IS_APPROX(m6=m6*m6, refMat6=refMat6*refMat6); - + // sparse matrix * sparse vector ColSpVector cv0(cols), cv1; DenseVector dcv0(cols), dcv1; initSparse(2*density,dcv0, cv0); - + RowSpVector rv0(depth), rv1; RowDenseVector drv0(depth), drv1(rv1); initSparse(2*density,drv0, rv0); - VERIFY_IS_APPROX(cv1=m3*cv0, dcv1=refMat3*dcv0); + VERIFY_IS_APPROX(cv1=m3*cv0, dcv1=refMat3*dcv0); VERIFY_IS_APPROX(rv1=rv0*m3, drv1=drv0*refMat3); VERIFY_IS_APPROX(cv1=m3t.adjoint()*cv0, dcv1=refMat3t.adjoint()*dcv0); VERIFY_IS_APPROX(cv1=rv0*m3, dcv1=drv0*refMat3); VERIFY_IS_APPROX(rv1=m3*cv0, drv1=refMat3*dcv0); } - + // test matrix - diagonal product { DenseMatrix refM2 = DenseMatrix::Zero(rows, cols); @@ -237,7 +245,7 @@ template<typename SparseMatrixType> void sparse_product() VERIFY_IS_APPROX(m3=m2.transpose()*d2, refM3=refM2.transpose()*d2); VERIFY_IS_APPROX(m3=d2*m2, refM3=d2*refM2); VERIFY_IS_APPROX(m3=d1*m2.transpose(), refM3=d1*refM2.transpose()); - + // also check with a SparseWrapper: DenseVector v1 = DenseVector::Random(cols); DenseVector v2 = DenseVector::Random(rows); @@ -246,12 +254,12 @@ template<typename SparseMatrixType> void sparse_product() VERIFY_IS_APPROX(m3=m2.transpose()*v2.asDiagonal(), refM3=refM2.transpose()*v2.asDiagonal()); VERIFY_IS_APPROX(m3=v2.asDiagonal()*m2, refM3=v2.asDiagonal()*refM2); VERIFY_IS_APPROX(m3=v1.asDiagonal()*m2.transpose(), refM3=v1.asDiagonal()*refM2.transpose()); - + VERIFY_IS_APPROX(m3=v2.asDiagonal()*m2*v1.asDiagonal(), refM3=v2.asDiagonal()*refM2*v1.asDiagonal()); VERIFY_IS_APPROX(v2=m2*v1.asDiagonal()*v1, refM2*v1.asDiagonal()*v1); VERIFY_IS_APPROX(v3=v2.asDiagonal()*m2*v1, v2.asDiagonal()*refM2*v1); - + // evaluate to a dense matrix to check the .row() and .col() iterator functions VERIFY_IS_APPROX(d3=m2*d1, refM3=refM2*d1); VERIFY_IS_APPROX(d3=m2.transpose()*d2, refM3=refM2.transpose()*d2); @@ -304,20 +312,20 @@ template<typename SparseMatrixType> void sparse_product() VERIFY_IS_APPROX(x.noalias()+=mUp.template selfadjointView<Upper>()*b, refX+=refS*b); VERIFY_IS_APPROX(x.noalias()-=mLo.template selfadjointView<Lower>()*b, refX-=refS*b); VERIFY_IS_APPROX(x.noalias()+=mS.template selfadjointView<Upper|Lower>()*b, refX+=refS*b); - + // sparse selfadjointView with sparse matrices SparseMatrixType mSres(rows,rows); VERIFY_IS_APPROX(mSres = mLo.template selfadjointView<Lower>()*mS, refX = refLo.template selfadjointView<Lower>()*refS); VERIFY_IS_APPROX(mSres = mS * mLo.template selfadjointView<Lower>(), refX = refS * refLo.template selfadjointView<Lower>()); - + // sparse triangularView with dense matrices VERIFY_IS_APPROX(x=mA.template triangularView<Upper>()*b, refX=refA.template triangularView<Upper>()*b); VERIFY_IS_APPROX(x=mA.template triangularView<Lower>()*b, refX=refA.template triangularView<Lower>()*b); VERIFY_IS_APPROX(x=b*mA.template triangularView<Upper>(), refX=b*refA.template triangularView<Upper>()); VERIFY_IS_APPROX(x=b*mA.template triangularView<Lower>(), refX=b*refA.template triangularView<Lower>()); - + // sparse triangularView with sparse matrices VERIFY_IS_APPROX(mSres = mA.template triangularView<Lower>()*mS, refX = refA.template triangularView<Lower>()*refS); VERIFY_IS_APPROX(mSres = mS * mA.template triangularView<Lower>(), refX = refS * refA.template triangularView<Lower>()); @@ -362,16 +370,98 @@ void bug_942() Vector d(1); d[0] = 2; - + double res = 2; - + VERIFY_IS_APPROX( ( cmA*d.asDiagonal() ).eval().coeff(0,0), res ); VERIFY_IS_APPROX( ( d.asDiagonal()*rmA ).eval().coeff(0,0), res ); VERIFY_IS_APPROX( ( rmA*d.asDiagonal() ).eval().coeff(0,0), res ); VERIFY_IS_APPROX( ( d.asDiagonal()*cmA ).eval().coeff(0,0), res ); } -void test_sparse_product() +template<typename Real> +void test_mixing_types() +{ + typedef std::complex<Real> Cplx; + typedef SparseMatrix<Real> SpMatReal; + typedef SparseMatrix<Cplx> SpMatCplx; + typedef SparseMatrix<Cplx,RowMajor> SpRowMatCplx; + typedef Matrix<Real,Dynamic,Dynamic> DenseMatReal; + typedef Matrix<Cplx,Dynamic,Dynamic> DenseMatCplx; + + Index n = internal::random<Index>(1,100); + double density = (std::max)(8./(n*n), 0.2); + + SpMatReal sR1(n,n); + SpMatCplx sC1(n,n), sC2(n,n), sC3(n,n); + SpRowMatCplx sCR(n,n); + DenseMatReal dR1(n,n); + DenseMatCplx dC1(n,n), dC2(n,n), dC3(n,n); + + initSparse<Real>(density, dR1, sR1); + initSparse<Cplx>(density, dC1, sC1); + initSparse<Cplx>(density, dC2, sC2); + + VERIFY_IS_APPROX( sC2 = (sR1 * sC1), dC3 = dR1.template cast<Cplx>() * dC1 ); + VERIFY_IS_APPROX( sC2 = (sC1 * sR1), dC3 = dC1 * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1), dC3 = dR1.template cast<Cplx>().transpose() * dC1 ); + VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1), dC3 = dC1.transpose() * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sC2 = (sR1 * sC1.transpose()), dC3 = dR1.template cast<Cplx>() * dC1.transpose() ); + VERIFY_IS_APPROX( sC2 = (sC1 * sR1.transpose()), dC3 = dC1 * dR1.template cast<Cplx>().transpose() ); + VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1.transpose()), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() ); + VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1.transpose()), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() ); + + VERIFY_IS_APPROX( sCR = (sR1 * sC1), dC3 = dR1.template cast<Cplx>() * dC1 ); + VERIFY_IS_APPROX( sCR = (sC1 * sR1), dC3 = dC1 * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1), dC3 = dR1.template cast<Cplx>().transpose() * dC1 ); + VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1), dC3 = dC1.transpose() * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sCR = (sR1 * sC1.transpose()), dC3 = dR1.template cast<Cplx>() * dC1.transpose() ); + VERIFY_IS_APPROX( sCR = (sC1 * sR1.transpose()), dC3 = dC1 * dR1.template cast<Cplx>().transpose() ); + VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1.transpose()), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() ); + VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1.transpose()), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() ); + + + VERIFY_IS_APPROX( sC2 = (sR1 * sC1).pruned(), dC3 = dR1.template cast<Cplx>() * dC1 ); + VERIFY_IS_APPROX( sC2 = (sC1 * sR1).pruned(), dC3 = dC1 * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1 ); + VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sC2 = (sR1 * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>() * dC1.transpose() ); + VERIFY_IS_APPROX( sC2 = (sC1 * sR1.transpose()).pruned(), dC3 = dC1 * dR1.template cast<Cplx>().transpose() ); + VERIFY_IS_APPROX( sC2 = (sR1.transpose() * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() ); + VERIFY_IS_APPROX( sC2 = (sC1.transpose() * sR1.transpose()).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() ); + + VERIFY_IS_APPROX( sCR = (sR1 * sC1).pruned(), dC3 = dR1.template cast<Cplx>() * dC1 ); + VERIFY_IS_APPROX( sCR = (sC1 * sR1).pruned(), dC3 = dC1 * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1 ); + VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( sCR = (sR1 * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>() * dC1.transpose() ); + VERIFY_IS_APPROX( sCR = (sC1 * sR1.transpose()).pruned(), dC3 = dC1 * dR1.template cast<Cplx>().transpose() ); + VERIFY_IS_APPROX( sCR = (sR1.transpose() * sC1.transpose()).pruned(), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() ); + VERIFY_IS_APPROX( sCR = (sC1.transpose() * sR1.transpose()).pruned(), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() ); + + + VERIFY_IS_APPROX( dC2 = (sR1 * sC1), dC3 = dR1.template cast<Cplx>() * dC1 ); + VERIFY_IS_APPROX( dC2 = (sC1 * sR1), dC3 = dC1 * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( dC2 = (sR1.transpose() * sC1), dC3 = dR1.template cast<Cplx>().transpose() * dC1 ); + VERIFY_IS_APPROX( dC2 = (sC1.transpose() * sR1), dC3 = dC1.transpose() * dR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( dC2 = (sR1 * sC1.transpose()), dC3 = dR1.template cast<Cplx>() * dC1.transpose() ); + VERIFY_IS_APPROX( dC2 = (sC1 * sR1.transpose()), dC3 = dC1 * dR1.template cast<Cplx>().transpose() ); + VERIFY_IS_APPROX( dC2 = (sR1.transpose() * sC1.transpose()), dC3 = dR1.template cast<Cplx>().transpose() * dC1.transpose() ); + VERIFY_IS_APPROX( dC2 = (sC1.transpose() * sR1.transpose()), dC3 = dC1.transpose() * dR1.template cast<Cplx>().transpose() ); + + + VERIFY_IS_APPROX( dC2 = dR1 * sC1, dC3 = dR1.template cast<Cplx>() * sC1 ); + VERIFY_IS_APPROX( dC2 = sR1 * dC1, dC3 = sR1.template cast<Cplx>() * dC1 ); + VERIFY_IS_APPROX( dC2 = dC1 * sR1, dC3 = dC1 * sR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( dC2 = sC1 * dR1, dC3 = sC1 * dR1.template cast<Cplx>() ); + + VERIFY_IS_APPROX( dC2 = dR1.row(0) * sC1, dC3 = dR1.template cast<Cplx>().row(0) * sC1 ); + VERIFY_IS_APPROX( dC2 = sR1 * dC1.col(0), dC3 = sR1.template cast<Cplx>() * dC1.col(0) ); + VERIFY_IS_APPROX( dC2 = dC1.row(0) * sR1, dC3 = dC1.row(0) * sR1.template cast<Cplx>() ); + VERIFY_IS_APPROX( dC2 = sC1 * dR1.col(0), dC3 = sC1 * dR1.template cast<Cplx>().col(0) ); +} + +EIGEN_DECLARE_TEST(sparse_product) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( (sparse_product<SparseMatrix<double,ColMajor> >()) ); @@ -381,5 +471,7 @@ void test_sparse_product() CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, RowMajor > >()) ); CALL_SUBTEST_3( (sparse_product<SparseMatrix<float,ColMajor,long int> >()) ); CALL_SUBTEST_4( (sparse_product_regression_test<SparseMatrix<double,RowMajor>, Matrix<double, Dynamic, Dynamic, RowMajor> >()) ); + + CALL_SUBTEST_5( (test_mixing_types<float>()) ); } } diff --git a/test/sparse_ref.cpp b/test/sparse_ref.cpp index 5e9607234..12b6f8a9d 100644 --- a/test/sparse_ref.cpp +++ b/test/sparse_ref.cpp @@ -126,7 +126,7 @@ void call_ref() VERIFY_EVALUATION_COUNT( call_ref_5(A.row(2), A.row(2).transpose()), 1); } -void test_sparse_ref() +EIGEN_DECLARE_TEST(sparse_ref) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( check_const_correctness(SparseMatrix<float>()) ); diff --git a/test/sparse_solver.h b/test/sparse_solver.h index 5145bc3eb..58927944b 100644 --- a/test/sparse_solver.h +++ b/test/sparse_solver.h @@ -9,6 +9,7 @@ #include "sparse.h" #include <Eigen/SparseCore> +#include <Eigen/SparseLU> #include <sstream> template<typename Solver, typename Rhs, typename Guess,typename Result> @@ -59,7 +60,11 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, x = solver.solve(b); if (solver.info() != Success) { - std::cerr << "WARNING | sparse solver testing: solving failed (" << typeid(Solver).name() << ")\n"; + std::cerr << "WARNING: sparse solver testing: solving failed (" << typeid(Solver).name() << ")\n"; + // dump call stack: + g_test_level++; + VERIFY(solver.info() == Success); + g_test_level--; return; } VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!"); @@ -67,7 +72,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, x.setZero(); solve_with_guess(solver, b, x, x); - VERIFY(solver.info() == Success && "solving failed when using analyzePattern/factorize API"); + VERIFY(solver.info() == Success && "solving failed when using solve_with_guess API"); VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!"); VERIFY(x.isApprox(refX,test_precision<Scalar>())); @@ -140,6 +145,136 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, } } +// specialization of generic check_sparse_solving for SuperLU in order to also test adjoint and transpose solves +template<typename Scalar, typename Rhs, typename DenseMat, typename DenseRhs> +void check_sparse_solving(Eigen::SparseLU<Eigen::SparseMatrix<Scalar> >& solver, const typename Eigen::SparseMatrix<Scalar>& A, const Rhs& b, const DenseMat& dA, const DenseRhs& db) +{ + typedef typename Eigen::SparseMatrix<Scalar> Mat; + typedef typename Mat::StorageIndex StorageIndex; + typedef typename Eigen::SparseLU<Eigen::SparseMatrix<Scalar> > Solver; + + // reference solutions computed by dense QR solver + DenseRhs refX1 = dA.householderQr().solve(db); // solution of A x = db + DenseRhs refX2 = dA.transpose().householderQr().solve(db); // solution of A^T * x = db (use transposed matrix A^T) + DenseRhs refX3 = dA.adjoint().householderQr().solve(db); // solution of A^* * x = db (use adjoint matrix A^*) + + + { + Rhs x1(A.cols(), b.cols()); + Rhs x2(A.cols(), b.cols()); + Rhs x3(A.cols(), b.cols()); + Rhs oldb = b; + + solver.compute(A); + if (solver.info() != Success) + { + std::cerr << "ERROR | sparse solver testing, factorization failed (" << typeid(Solver).name() << ")\n"; + VERIFY(solver.info() == Success); + } + x1 = solver.solve(b); + if (solver.info() != Success) + { + std::cerr << "WARNING | sparse solver testing: solving failed (" << typeid(Solver).name() << ")\n"; + return; + } + VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x1.isApprox(refX1,test_precision<Scalar>())); + + // test solve with transposed + x2 = solver.transpose().solve(b); + VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x2.isApprox(refX2,test_precision<Scalar>())); + + + // test solve with adjoint + //solver.template _solve_impl_transposed<true>(b, x3); + x3 = solver.adjoint().solve(b); + VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x3.isApprox(refX3,test_precision<Scalar>())); + + x1.setZero(); + solve_with_guess(solver, b, x1, x1); + VERIFY(solver.info() == Success && "solving failed when using analyzePattern/factorize API"); + VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x1.isApprox(refX1,test_precision<Scalar>())); + + x1.setZero(); + x2.setZero(); + x3.setZero(); + // test the analyze/factorize API + solver.analyzePattern(A); + solver.factorize(A); + VERIFY(solver.info() == Success && "factorization failed when using analyzePattern/factorize API"); + x1 = solver.solve(b); + x2 = solver.transpose().solve(b); + x3 = solver.adjoint().solve(b); + + VERIFY(solver.info() == Success && "solving failed when using analyzePattern/factorize API"); + VERIFY(oldb.isApprox(b,0.0) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x1.isApprox(refX1,test_precision<Scalar>())); + VERIFY(x2.isApprox(refX2,test_precision<Scalar>())); + VERIFY(x3.isApprox(refX3,test_precision<Scalar>())); + + x1.setZero(); + // test with Map + MappedSparseMatrix<Scalar,Mat::Options,StorageIndex> Am(A.rows(), A.cols(), A.nonZeros(), const_cast<StorageIndex*>(A.outerIndexPtr()), const_cast<StorageIndex*>(A.innerIndexPtr()), const_cast<Scalar*>(A.valuePtr())); + solver.compute(Am); + VERIFY(solver.info() == Success && "factorization failed when using Map"); + DenseRhs dx(refX1); + dx.setZero(); + Map<DenseRhs> xm(dx.data(), dx.rows(), dx.cols()); + Map<const DenseRhs> bm(db.data(), db.rows(), db.cols()); + xm = solver.solve(bm); + VERIFY(solver.info() == Success && "solving failed when using Map"); + VERIFY(oldb.isApprox(bm,0.0) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(xm.isApprox(refX1,test_precision<Scalar>())); + } + + // if not too large, do some extra check: + if(A.rows()<2000) + { + // test initialization ctor + { + Rhs x(b.rows(), b.cols()); + Solver solver2(A); + VERIFY(solver2.info() == Success); + x = solver2.solve(b); + VERIFY(x.isApprox(refX1,test_precision<Scalar>())); + } + + // test dense Block as the result and rhs: + { + DenseRhs x(refX1.rows(), refX1.cols()); + DenseRhs oldb(db); + x.setZero(); + x.block(0,0,x.rows(),x.cols()) = solver.solve(db.block(0,0,db.rows(),db.cols())); + VERIFY(oldb.isApprox(db,0.0) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x.isApprox(refX1,test_precision<Scalar>())); + } + + // test uncompressed inputs + { + Mat A2 = A; + A2.reserve((ArrayXf::Random(A.outerSize())+2).template cast<typename Mat::StorageIndex>().eval()); + solver.compute(A2); + Rhs x = solver.solve(b); + VERIFY(x.isApprox(refX1,test_precision<Scalar>())); + } + + // test expression as input + { + solver.compute(0.5*(A+A)); + Rhs x = solver.solve(b); + VERIFY(x.isApprox(refX1,test_precision<Scalar>())); + + Solver solver2(0.5*(A+A)); + Rhs x2 = solver2.solve(b); + VERIFY(x2.isApprox(refX1,test_precision<Scalar>())); + } + } +} + + template<typename Solver, typename Rhs> void check_sparse_solving_real_cases(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const typename Solver::MatrixType& fullA, const Rhs& refX) { @@ -266,7 +401,7 @@ std::string solver_stats(const SparseSolverBase<Derived> &/*solver*/) } #endif -template<typename Solver> void check_sparse_spd_solving(Solver& solver, int maxSize = 300, int maxRealWorldSize = 100000) +template<typename Solver> void check_sparse_spd_solving(Solver& solver, int maxSize = (std::min)(300,EIGEN_TEST_MAX_SIZE), int maxRealWorldSize = 100000) { typedef typename Solver::MatrixType Mat; typedef typename Mat::Scalar Scalar; @@ -429,8 +564,7 @@ template<typename Solver> void check_sparse_square_solving(Solver& solver, int m // check only once if(i==0) { - b = DenseVector::Zero(size); - check_sparse_solving(solver, A, b, dA, b); + CALL_SUBTEST(b = DenseVector::Zero(size); check_sparse_solving(solver, A, b, dA, b)); } // regression test for Bug 792 (structurally rank deficient matrices): if(checkDeficient && size>1) { diff --git a/test/sparse_solvers.cpp b/test/sparse_solvers.cpp index 3a8873d43..3b7cd7788 100644 --- a/test/sparse_solvers.cpp +++ b/test/sparse_solvers.cpp @@ -98,10 +98,23 @@ template<typename Scalar> void sparse_solvers(int rows, int cols) initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, &zeroCoords, &nonzeroCoords); VERIFY_IS_APPROX(refMat2.template triangularView<Lower>().solve(vec2), m2.template triangularView<Lower>().solve(vec3)); + + // test empty triangular matrix + { + m2.resize(0,0); + refMatB.resize(0,refMatB.cols()); + DenseMatrix res = m2.template triangularView<Lower>().solve(refMatB); + VERIFY_IS_EQUAL(res.rows(),0); + VERIFY_IS_EQUAL(res.cols(),refMatB.cols()); + res = refMatB; + m2.template triangularView<Lower>().solveInPlace(res); + VERIFY_IS_EQUAL(res.rows(),0); + VERIFY_IS_EQUAL(res.cols(),refMatB.cols()); + } } } -void test_sparse_solvers() +EIGEN_DECLARE_TEST(sparse_solvers) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(sparse_solvers<double>(8, 8) ); diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp index b3e1dda25..35129278b 100644 --- a/test/sparse_vector.cpp +++ b/test/sparse_vector.cpp @@ -145,7 +145,7 @@ template<typename Scalar,typename StorageIndex> void sparse_vector(int rows, int } -void test_sparse_vector() +EIGEN_DECLARE_TEST(sparse_vector) { for(int i = 0; i < g_repeat; i++) { int r = Eigen::internal::random<int>(1,500), c = Eigen::internal::random<int>(1,500); diff --git a/test/sparselu.cpp b/test/sparselu.cpp index bd000baf1..84cc6ebe5 100644 --- a/test/sparselu.cpp +++ b/test/sparselu.cpp @@ -36,7 +36,7 @@ template<typename T> void test_sparselu_T() check_sparse_square_determinant(sparselu_amd); } -void test_sparselu() +EIGEN_DECLARE_TEST(sparselu) { CALL_SUBTEST_1(test_sparselu_T<float>()); CALL_SUBTEST_2(test_sparselu_T<double>()); diff --git a/test/sparseqr.cpp b/test/sparseqr.cpp index e8605fd21..3576cc626 100644 --- a/test/sparseqr.cpp +++ b/test/sparseqr.cpp @@ -43,6 +43,7 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows template<typename Scalar> void test_sparseqr_scalar() { + typedef typename NumTraits<Scalar>::Real RealScalar; typedef SparseMatrix<Scalar,ColMajor> MatrixType; typedef Matrix<Scalar,Dynamic,Dynamic> DenseMat; typedef Matrix<Scalar,Dynamic,1> DenseVector; @@ -54,6 +55,28 @@ template<typename Scalar> void test_sparseqr_scalar() b = dA * DenseVector::Random(A.cols()); solver.compute(A); + + // Q should be MxM + VERIFY_IS_EQUAL(solver.matrixQ().rows(), A.rows()); + VERIFY_IS_EQUAL(solver.matrixQ().cols(), A.rows()); + + // R should be MxN + VERIFY_IS_EQUAL(solver.matrixR().rows(), A.rows()); + VERIFY_IS_EQUAL(solver.matrixR().cols(), A.cols()); + + // Q and R can be multiplied + DenseMat recoveredA = solver.matrixQ() + * DenseMat(solver.matrixR().template triangularView<Upper>()) + * solver.colsPermutation().transpose(); + VERIFY_IS_EQUAL(recoveredA.rows(), A.rows()); + VERIFY_IS_EQUAL(recoveredA.cols(), A.cols()); + + // and in the full rank case the original matrix is recovered + if (solver.rank() == A.cols()) + { + VERIFY_IS_APPROX(A, recoveredA); + } + if(internal::random<float>(0,1)>0.5f) solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change. if (solver.info() != Success) @@ -69,14 +92,34 @@ template<typename Scalar> void test_sparseqr_scalar() exit(0); return; } - - VERIFY_IS_APPROX(A * x, b); - - //Compare with a dense QR solver + + // Compare with a dense QR solver ColPivHouseholderQR<DenseMat> dqr(dA); refX = dqr.solve(b); - VERIFY_IS_EQUAL(dqr.rank(), solver.rank()); + bool rank_deficient = A.cols()>A.rows() || dqr.rank()<A.cols(); + if(rank_deficient) + { + // rank deficient problem -> we might have to increase the threshold + // to get a correct solution. + RealScalar th = RealScalar(20)*dA.colwise().norm().maxCoeff()*(A.rows()+A.cols()) * NumTraits<RealScalar>::epsilon(); + for(Index k=0; (k<16) && !test_isApprox(A*x,b); ++k) + { + th *= RealScalar(10); + solver.setPivotThreshold(th); + solver.compute(A); + x = solver.solve(b); + } + } + + VERIFY_IS_APPROX(A * x, b); + + // For rank deficient problem, the estimated rank might + // be slightly off, so let's only raise a warning in such cases. + if(rank_deficient) ++g_test_level; + VERIFY_IS_EQUAL(solver.rank(), dqr.rank()); + if(rank_deficient) --g_test_level; + if(solver.rank()==A.cols()) // full rank VERIFY_IS_APPROX(x, refX); // else @@ -95,7 +138,7 @@ template<typename Scalar> void test_sparseqr_scalar() dQ = solver.matrixQ(); VERIFY_IS_APPROX(Q, dQ); } -void test_sparseqr() +EIGEN_DECLARE_TEST(sparseqr) { for(int i=0; i<g_repeat; ++i) { diff --git a/test/special_numbers.cpp b/test/special_numbers.cpp index 2f1b704be..1e1a63631 100644 --- a/test/special_numbers.cpp +++ b/test/special_numbers.cpp @@ -49,7 +49,7 @@ template<typename Scalar> void special_numbers() VERIFY(!mboth.array().allFinite()); } -void test_special_numbers() +EIGEN_DECLARE_TEST(special_numbers) { for(int i = 0; i < 10*g_repeat; i++) { CALL_SUBTEST_1( special_numbers<float>() ); diff --git a/test/split_test_helper.h b/test/split_test_helper.h new file mode 100644 index 000000000..82e82aaef --- /dev/null +++ b/test/split_test_helper.h @@ -0,0 +1,5994 @@ +#if defined(EIGEN_TEST_PART_1) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_1(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_1(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_2) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_2(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_2(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_3) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_3(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_3(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_4) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_4(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_4(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_5) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_5(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_5(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_6) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_6(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_6(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_7) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_7(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_7(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_8) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_8(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_8(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_9) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_9(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_9(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_10) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_10(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_10(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_11) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_11(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_11(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_12) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_12(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_12(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_13) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_13(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_13(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_14) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_14(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_14(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_15) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_15(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_15(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_16) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_16(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_16(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_17) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_17(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_17(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_18) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_18(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_18(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_19) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_19(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_19(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_20) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_20(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_20(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_21) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_21(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_21(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_22) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_22(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_22(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_23) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_23(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_23(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_24) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_24(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_24(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_25) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_25(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_25(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_26) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_26(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_26(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_27) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_27(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_27(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_28) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_28(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_28(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_29) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_29(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_29(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_30) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_30(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_30(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_31) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_31(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_31(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_32) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_32(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_32(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_33) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_33(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_33(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_34) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_34(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_34(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_35) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_35(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_35(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_36) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_36(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_36(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_37) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_37(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_37(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_38) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_38(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_38(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_39) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_39(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_39(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_40) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_40(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_40(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_41) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_41(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_41(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_42) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_42(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_42(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_43) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_43(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_43(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_44) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_44(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_44(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_45) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_45(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_45(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_46) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_46(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_46(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_47) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_47(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_47(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_48) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_48(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_48(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_49) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_49(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_49(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_50) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_50(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_50(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_51) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_51(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_51(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_52) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_52(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_52(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_53) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_53(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_53(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_54) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_54(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_54(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_55) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_55(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_55(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_56) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_56(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_56(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_57) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_57(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_57(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_58) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_58(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_58(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_59) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_59(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_59(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_60) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_60(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_60(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_61) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_61(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_61(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_62) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_62(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_62(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_63) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_63(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_63(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_64) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_64(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_64(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_65) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_65(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_65(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_66) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_66(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_66(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_67) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_67(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_67(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_68) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_68(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_68(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_69) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_69(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_69(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_70) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_70(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_70(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_71) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_71(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_71(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_72) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_72(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_72(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_73) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_73(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_73(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_74) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_74(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_74(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_75) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_75(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_75(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_76) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_76(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_76(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_77) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_77(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_77(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_78) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_78(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_78(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_79) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_79(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_79(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_80) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_80(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_80(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_81) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_81(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_81(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_82) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_82(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_82(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_83) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_83(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_83(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_84) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_84(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_84(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_85) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_85(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_85(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_86) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_86(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_86(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_87) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_87(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_87(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_88) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_88(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_88(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_89) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_89(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_89(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_90) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_90(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_90(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_91) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_91(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_91(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_92) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_92(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_92(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_93) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_93(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_93(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_94) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_94(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_94(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_95) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_95(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_95(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_96) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_96(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_96(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_97) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_97(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_97(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_98) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_98(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_98(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_99) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_99(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_99(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_100) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_100(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_100(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_101) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_101(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_101(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_102) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_102(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_102(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_103) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_103(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_103(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_104) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_104(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_104(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_105) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_105(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_105(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_106) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_106(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_106(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_107) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_107(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_107(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_108) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_108(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_108(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_109) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_109(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_109(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_110) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_110(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_110(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_111) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_111(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_111(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_112) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_112(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_112(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_113) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_113(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_113(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_114) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_114(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_114(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_115) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_115(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_115(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_116) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_116(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_116(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_117) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_117(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_117(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_118) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_118(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_118(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_119) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_119(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_119(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_120) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_120(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_120(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_121) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_121(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_121(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_122) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_122(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_122(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_123) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_123(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_123(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_124) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_124(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_124(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_125) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_125(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_125(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_126) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_126(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_126(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_127) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_127(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_127(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_128) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_128(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_128(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_129) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_129(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_129(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_130) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_130(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_130(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_131) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_131(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_131(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_132) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_132(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_132(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_133) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_133(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_133(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_134) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_134(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_134(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_135) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_135(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_135(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_136) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_136(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_136(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_137) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_137(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_137(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_138) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_138(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_138(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_139) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_139(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_139(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_140) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_140(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_140(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_141) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_141(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_141(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_142) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_142(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_142(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_143) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_143(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_143(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_144) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_144(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_144(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_145) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_145(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_145(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_146) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_146(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_146(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_147) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_147(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_147(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_148) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_148(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_148(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_149) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_149(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_149(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_150) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_150(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_150(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_151) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_151(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_151(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_152) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_152(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_152(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_153) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_153(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_153(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_154) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_154(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_154(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_155) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_155(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_155(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_156) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_156(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_156(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_157) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_157(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_157(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_158) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_158(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_158(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_159) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_159(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_159(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_160) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_160(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_160(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_161) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_161(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_161(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_162) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_162(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_162(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_163) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_163(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_163(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_164) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_164(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_164(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_165) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_165(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_165(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_166) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_166(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_166(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_167) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_167(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_167(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_168) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_168(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_168(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_169) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_169(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_169(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_170) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_170(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_170(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_171) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_171(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_171(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_172) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_172(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_172(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_173) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_173(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_173(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_174) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_174(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_174(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_175) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_175(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_175(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_176) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_176(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_176(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_177) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_177(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_177(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_178) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_178(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_178(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_179) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_179(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_179(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_180) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_180(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_180(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_181) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_181(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_181(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_182) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_182(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_182(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_183) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_183(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_183(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_184) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_184(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_184(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_185) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_185(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_185(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_186) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_186(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_186(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_187) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_187(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_187(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_188) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_188(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_188(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_189) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_189(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_189(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_190) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_190(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_190(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_191) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_191(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_191(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_192) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_192(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_192(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_193) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_193(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_193(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_194) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_194(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_194(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_195) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_195(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_195(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_196) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_196(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_196(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_197) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_197(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_197(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_198) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_198(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_198(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_199) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_199(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_199(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_200) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_200(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_200(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_201) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_201(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_201(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_202) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_202(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_202(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_203) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_203(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_203(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_204) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_204(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_204(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_205) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_205(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_205(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_206) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_206(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_206(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_207) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_207(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_207(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_208) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_208(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_208(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_209) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_209(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_209(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_210) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_210(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_210(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_211) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_211(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_211(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_212) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_212(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_212(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_213) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_213(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_213(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_214) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_214(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_214(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_215) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_215(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_215(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_216) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_216(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_216(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_217) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_217(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_217(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_218) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_218(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_218(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_219) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_219(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_219(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_220) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_220(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_220(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_221) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_221(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_221(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_222) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_222(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_222(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_223) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_223(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_223(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_224) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_224(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_224(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_225) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_225(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_225(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_226) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_226(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_226(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_227) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_227(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_227(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_228) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_228(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_228(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_229) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_229(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_229(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_230) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_230(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_230(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_231) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_231(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_231(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_232) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_232(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_232(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_233) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_233(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_233(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_234) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_234(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_234(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_235) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_235(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_235(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_236) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_236(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_236(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_237) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_237(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_237(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_238) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_238(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_238(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_239) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_239(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_239(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_240) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_240(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_240(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_241) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_241(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_241(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_242) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_242(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_242(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_243) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_243(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_243(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_244) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_244(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_244(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_245) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_245(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_245(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_246) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_246(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_246(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_247) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_247(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_247(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_248) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_248(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_248(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_249) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_249(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_249(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_250) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_250(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_250(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_251) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_251(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_251(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_252) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_252(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_252(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_253) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_253(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_253(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_254) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_254(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_254(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_255) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_255(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_255(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_256) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_256(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_256(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_257) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_257(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_257(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_258) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_258(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_258(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_259) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_259(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_259(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_260) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_260(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_260(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_261) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_261(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_261(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_262) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_262(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_262(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_263) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_263(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_263(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_264) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_264(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_264(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_265) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_265(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_265(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_266) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_266(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_266(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_267) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_267(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_267(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_268) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_268(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_268(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_269) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_269(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_269(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_270) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_270(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_270(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_271) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_271(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_271(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_272) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_272(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_272(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_273) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_273(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_273(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_274) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_274(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_274(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_275) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_275(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_275(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_276) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_276(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_276(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_277) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_277(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_277(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_278) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_278(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_278(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_279) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_279(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_279(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_280) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_280(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_280(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_281) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_281(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_281(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_282) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_282(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_282(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_283) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_283(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_283(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_284) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_284(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_284(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_285) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_285(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_285(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_286) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_286(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_286(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_287) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_287(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_287(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_288) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_288(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_288(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_289) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_289(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_289(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_290) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_290(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_290(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_291) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_291(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_291(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_292) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_292(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_292(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_293) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_293(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_293(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_294) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_294(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_294(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_295) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_295(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_295(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_296) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_296(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_296(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_297) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_297(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_297(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_298) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_298(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_298(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_299) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_299(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_299(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_300) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_300(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_300(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_301) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_301(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_301(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_302) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_302(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_302(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_303) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_303(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_303(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_304) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_304(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_304(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_305) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_305(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_305(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_306) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_306(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_306(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_307) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_307(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_307(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_308) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_308(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_308(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_309) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_309(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_309(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_310) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_310(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_310(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_311) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_311(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_311(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_312) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_312(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_312(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_313) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_313(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_313(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_314) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_314(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_314(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_315) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_315(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_315(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_316) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_316(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_316(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_317) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_317(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_317(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_318) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_318(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_318(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_319) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_319(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_319(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_320) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_320(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_320(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_321) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_321(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_321(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_322) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_322(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_322(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_323) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_323(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_323(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_324) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_324(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_324(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_325) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_325(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_325(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_326) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_326(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_326(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_327) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_327(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_327(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_328) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_328(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_328(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_329) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_329(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_329(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_330) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_330(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_330(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_331) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_331(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_331(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_332) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_332(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_332(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_333) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_333(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_333(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_334) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_334(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_334(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_335) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_335(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_335(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_336) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_336(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_336(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_337) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_337(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_337(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_338) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_338(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_338(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_339) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_339(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_339(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_340) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_340(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_340(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_341) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_341(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_341(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_342) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_342(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_342(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_343) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_343(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_343(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_344) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_344(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_344(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_345) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_345(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_345(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_346) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_346(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_346(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_347) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_347(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_347(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_348) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_348(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_348(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_349) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_349(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_349(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_350) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_350(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_350(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_351) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_351(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_351(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_352) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_352(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_352(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_353) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_353(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_353(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_354) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_354(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_354(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_355) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_355(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_355(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_356) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_356(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_356(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_357) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_357(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_357(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_358) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_358(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_358(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_359) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_359(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_359(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_360) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_360(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_360(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_361) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_361(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_361(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_362) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_362(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_362(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_363) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_363(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_363(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_364) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_364(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_364(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_365) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_365(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_365(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_366) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_366(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_366(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_367) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_367(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_367(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_368) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_368(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_368(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_369) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_369(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_369(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_370) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_370(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_370(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_371) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_371(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_371(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_372) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_372(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_372(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_373) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_373(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_373(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_374) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_374(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_374(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_375) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_375(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_375(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_376) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_376(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_376(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_377) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_377(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_377(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_378) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_378(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_378(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_379) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_379(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_379(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_380) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_380(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_380(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_381) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_381(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_381(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_382) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_382(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_382(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_383) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_383(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_383(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_384) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_384(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_384(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_385) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_385(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_385(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_386) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_386(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_386(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_387) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_387(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_387(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_388) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_388(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_388(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_389) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_389(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_389(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_390) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_390(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_390(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_391) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_391(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_391(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_392) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_392(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_392(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_393) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_393(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_393(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_394) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_394(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_394(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_395) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_395(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_395(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_396) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_396(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_396(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_397) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_397(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_397(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_398) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_398(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_398(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_399) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_399(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_399(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_400) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_400(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_400(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_401) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_401(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_401(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_402) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_402(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_402(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_403) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_403(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_403(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_404) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_404(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_404(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_405) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_405(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_405(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_406) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_406(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_406(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_407) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_407(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_407(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_408) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_408(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_408(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_409) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_409(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_409(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_410) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_410(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_410(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_411) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_411(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_411(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_412) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_412(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_412(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_413) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_413(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_413(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_414) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_414(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_414(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_415) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_415(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_415(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_416) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_416(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_416(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_417) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_417(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_417(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_418) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_418(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_418(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_419) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_419(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_419(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_420) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_420(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_420(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_421) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_421(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_421(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_422) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_422(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_422(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_423) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_423(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_423(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_424) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_424(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_424(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_425) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_425(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_425(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_426) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_426(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_426(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_427) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_427(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_427(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_428) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_428(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_428(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_429) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_429(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_429(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_430) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_430(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_430(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_431) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_431(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_431(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_432) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_432(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_432(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_433) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_433(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_433(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_434) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_434(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_434(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_435) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_435(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_435(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_436) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_436(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_436(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_437) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_437(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_437(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_438) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_438(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_438(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_439) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_439(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_439(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_440) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_440(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_440(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_441) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_441(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_441(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_442) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_442(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_442(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_443) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_443(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_443(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_444) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_444(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_444(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_445) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_445(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_445(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_446) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_446(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_446(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_447) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_447(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_447(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_448) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_448(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_448(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_449) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_449(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_449(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_450) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_450(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_450(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_451) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_451(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_451(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_452) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_452(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_452(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_453) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_453(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_453(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_454) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_454(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_454(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_455) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_455(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_455(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_456) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_456(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_456(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_457) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_457(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_457(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_458) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_458(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_458(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_459) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_459(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_459(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_460) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_460(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_460(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_461) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_461(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_461(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_462) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_462(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_462(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_463) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_463(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_463(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_464) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_464(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_464(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_465) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_465(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_465(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_466) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_466(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_466(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_467) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_467(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_467(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_468) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_468(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_468(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_469) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_469(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_469(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_470) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_470(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_470(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_471) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_471(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_471(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_472) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_472(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_472(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_473) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_473(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_473(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_474) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_474(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_474(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_475) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_475(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_475(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_476) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_476(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_476(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_477) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_477(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_477(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_478) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_478(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_478(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_479) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_479(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_479(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_480) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_480(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_480(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_481) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_481(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_481(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_482) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_482(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_482(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_483) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_483(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_483(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_484) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_484(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_484(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_485) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_485(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_485(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_486) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_486(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_486(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_487) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_487(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_487(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_488) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_488(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_488(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_489) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_489(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_489(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_490) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_490(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_490(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_491) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_491(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_491(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_492) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_492(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_492(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_493) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_493(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_493(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_494) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_494(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_494(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_495) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_495(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_495(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_496) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_496(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_496(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_497) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_497(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_497(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_498) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_498(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_498(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_499) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_499(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_499(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_500) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_500(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_500(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_501) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_501(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_501(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_502) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_502(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_502(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_503) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_503(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_503(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_504) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_504(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_504(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_505) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_505(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_505(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_506) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_506(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_506(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_507) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_507(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_507(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_508) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_508(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_508(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_509) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_509(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_509(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_510) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_510(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_510(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_511) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_511(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_511(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_512) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_512(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_512(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_513) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_513(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_513(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_514) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_514(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_514(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_515) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_515(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_515(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_516) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_516(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_516(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_517) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_517(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_517(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_518) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_518(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_518(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_519) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_519(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_519(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_520) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_520(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_520(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_521) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_521(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_521(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_522) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_522(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_522(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_523) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_523(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_523(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_524) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_524(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_524(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_525) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_525(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_525(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_526) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_526(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_526(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_527) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_527(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_527(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_528) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_528(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_528(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_529) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_529(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_529(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_530) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_530(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_530(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_531) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_531(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_531(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_532) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_532(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_532(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_533) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_533(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_533(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_534) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_534(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_534(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_535) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_535(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_535(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_536) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_536(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_536(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_537) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_537(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_537(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_538) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_538(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_538(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_539) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_539(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_539(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_540) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_540(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_540(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_541) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_541(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_541(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_542) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_542(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_542(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_543) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_543(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_543(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_544) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_544(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_544(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_545) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_545(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_545(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_546) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_546(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_546(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_547) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_547(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_547(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_548) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_548(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_548(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_549) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_549(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_549(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_550) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_550(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_550(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_551) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_551(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_551(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_552) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_552(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_552(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_553) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_553(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_553(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_554) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_554(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_554(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_555) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_555(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_555(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_556) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_556(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_556(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_557) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_557(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_557(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_558) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_558(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_558(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_559) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_559(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_559(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_560) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_560(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_560(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_561) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_561(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_561(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_562) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_562(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_562(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_563) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_563(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_563(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_564) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_564(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_564(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_565) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_565(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_565(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_566) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_566(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_566(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_567) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_567(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_567(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_568) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_568(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_568(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_569) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_569(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_569(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_570) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_570(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_570(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_571) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_571(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_571(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_572) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_572(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_572(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_573) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_573(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_573(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_574) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_574(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_574(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_575) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_575(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_575(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_576) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_576(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_576(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_577) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_577(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_577(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_578) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_578(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_578(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_579) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_579(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_579(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_580) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_580(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_580(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_581) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_581(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_581(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_582) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_582(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_582(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_583) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_583(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_583(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_584) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_584(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_584(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_585) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_585(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_585(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_586) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_586(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_586(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_587) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_587(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_587(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_588) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_588(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_588(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_589) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_589(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_589(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_590) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_590(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_590(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_591) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_591(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_591(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_592) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_592(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_592(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_593) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_593(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_593(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_594) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_594(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_594(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_595) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_595(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_595(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_596) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_596(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_596(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_597) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_597(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_597(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_598) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_598(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_598(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_599) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_599(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_599(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_600) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_600(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_600(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_601) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_601(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_601(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_602) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_602(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_602(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_603) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_603(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_603(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_604) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_604(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_604(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_605) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_605(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_605(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_606) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_606(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_606(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_607) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_607(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_607(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_608) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_608(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_608(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_609) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_609(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_609(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_610) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_610(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_610(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_611) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_611(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_611(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_612) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_612(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_612(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_613) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_613(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_613(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_614) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_614(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_614(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_615) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_615(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_615(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_616) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_616(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_616(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_617) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_617(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_617(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_618) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_618(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_618(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_619) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_619(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_619(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_620) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_620(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_620(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_621) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_621(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_621(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_622) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_622(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_622(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_623) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_623(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_623(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_624) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_624(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_624(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_625) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_625(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_625(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_626) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_626(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_626(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_627) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_627(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_627(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_628) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_628(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_628(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_629) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_629(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_629(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_630) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_630(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_630(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_631) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_631(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_631(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_632) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_632(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_632(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_633) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_633(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_633(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_634) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_634(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_634(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_635) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_635(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_635(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_636) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_636(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_636(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_637) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_637(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_637(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_638) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_638(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_638(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_639) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_639(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_639(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_640) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_640(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_640(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_641) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_641(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_641(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_642) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_642(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_642(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_643) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_643(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_643(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_644) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_644(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_644(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_645) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_645(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_645(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_646) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_646(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_646(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_647) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_647(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_647(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_648) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_648(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_648(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_649) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_649(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_649(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_650) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_650(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_650(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_651) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_651(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_651(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_652) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_652(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_652(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_653) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_653(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_653(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_654) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_654(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_654(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_655) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_655(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_655(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_656) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_656(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_656(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_657) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_657(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_657(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_658) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_658(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_658(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_659) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_659(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_659(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_660) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_660(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_660(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_661) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_661(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_661(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_662) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_662(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_662(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_663) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_663(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_663(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_664) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_664(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_664(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_665) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_665(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_665(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_666) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_666(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_666(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_667) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_667(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_667(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_668) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_668(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_668(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_669) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_669(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_669(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_670) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_670(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_670(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_671) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_671(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_671(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_672) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_672(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_672(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_673) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_673(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_673(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_674) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_674(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_674(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_675) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_675(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_675(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_676) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_676(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_676(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_677) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_677(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_677(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_678) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_678(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_678(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_679) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_679(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_679(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_680) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_680(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_680(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_681) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_681(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_681(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_682) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_682(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_682(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_683) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_683(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_683(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_684) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_684(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_684(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_685) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_685(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_685(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_686) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_686(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_686(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_687) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_687(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_687(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_688) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_688(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_688(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_689) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_689(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_689(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_690) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_690(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_690(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_691) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_691(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_691(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_692) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_692(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_692(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_693) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_693(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_693(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_694) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_694(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_694(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_695) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_695(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_695(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_696) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_696(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_696(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_697) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_697(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_697(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_698) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_698(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_698(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_699) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_699(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_699(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_700) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_700(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_700(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_701) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_701(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_701(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_702) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_702(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_702(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_703) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_703(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_703(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_704) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_704(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_704(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_705) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_705(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_705(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_706) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_706(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_706(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_707) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_707(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_707(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_708) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_708(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_708(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_709) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_709(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_709(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_710) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_710(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_710(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_711) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_711(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_711(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_712) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_712(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_712(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_713) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_713(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_713(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_714) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_714(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_714(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_715) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_715(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_715(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_716) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_716(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_716(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_717) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_717(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_717(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_718) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_718(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_718(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_719) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_719(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_719(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_720) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_720(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_720(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_721) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_721(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_721(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_722) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_722(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_722(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_723) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_723(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_723(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_724) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_724(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_724(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_725) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_725(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_725(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_726) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_726(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_726(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_727) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_727(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_727(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_728) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_728(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_728(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_729) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_729(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_729(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_730) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_730(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_730(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_731) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_731(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_731(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_732) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_732(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_732(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_733) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_733(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_733(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_734) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_734(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_734(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_735) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_735(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_735(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_736) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_736(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_736(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_737) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_737(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_737(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_738) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_738(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_738(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_739) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_739(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_739(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_740) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_740(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_740(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_741) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_741(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_741(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_742) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_742(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_742(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_743) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_743(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_743(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_744) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_744(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_744(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_745) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_745(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_745(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_746) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_746(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_746(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_747) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_747(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_747(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_748) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_748(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_748(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_749) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_749(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_749(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_750) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_750(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_750(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_751) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_751(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_751(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_752) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_752(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_752(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_753) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_753(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_753(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_754) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_754(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_754(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_755) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_755(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_755(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_756) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_756(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_756(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_757) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_757(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_757(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_758) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_758(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_758(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_759) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_759(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_759(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_760) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_760(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_760(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_761) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_761(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_761(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_762) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_762(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_762(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_763) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_763(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_763(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_764) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_764(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_764(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_765) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_765(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_765(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_766) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_766(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_766(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_767) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_767(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_767(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_768) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_768(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_768(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_769) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_769(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_769(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_770) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_770(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_770(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_771) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_771(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_771(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_772) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_772(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_772(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_773) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_773(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_773(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_774) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_774(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_774(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_775) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_775(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_775(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_776) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_776(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_776(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_777) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_777(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_777(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_778) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_778(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_778(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_779) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_779(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_779(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_780) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_780(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_780(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_781) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_781(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_781(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_782) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_782(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_782(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_783) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_783(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_783(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_784) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_784(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_784(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_785) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_785(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_785(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_786) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_786(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_786(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_787) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_787(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_787(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_788) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_788(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_788(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_789) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_789(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_789(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_790) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_790(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_790(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_791) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_791(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_791(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_792) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_792(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_792(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_793) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_793(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_793(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_794) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_794(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_794(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_795) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_795(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_795(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_796) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_796(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_796(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_797) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_797(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_797(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_798) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_798(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_798(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_799) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_799(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_799(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_800) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_800(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_800(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_801) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_801(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_801(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_802) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_802(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_802(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_803) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_803(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_803(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_804) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_804(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_804(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_805) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_805(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_805(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_806) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_806(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_806(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_807) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_807(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_807(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_808) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_808(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_808(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_809) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_809(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_809(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_810) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_810(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_810(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_811) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_811(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_811(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_812) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_812(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_812(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_813) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_813(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_813(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_814) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_814(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_814(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_815) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_815(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_815(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_816) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_816(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_816(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_817) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_817(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_817(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_818) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_818(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_818(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_819) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_819(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_819(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_820) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_820(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_820(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_821) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_821(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_821(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_822) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_822(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_822(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_823) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_823(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_823(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_824) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_824(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_824(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_825) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_825(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_825(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_826) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_826(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_826(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_827) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_827(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_827(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_828) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_828(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_828(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_829) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_829(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_829(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_830) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_830(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_830(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_831) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_831(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_831(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_832) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_832(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_832(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_833) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_833(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_833(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_834) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_834(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_834(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_835) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_835(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_835(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_836) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_836(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_836(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_837) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_837(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_837(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_838) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_838(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_838(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_839) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_839(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_839(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_840) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_840(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_840(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_841) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_841(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_841(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_842) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_842(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_842(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_843) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_843(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_843(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_844) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_844(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_844(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_845) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_845(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_845(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_846) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_846(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_846(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_847) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_847(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_847(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_848) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_848(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_848(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_849) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_849(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_849(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_850) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_850(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_850(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_851) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_851(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_851(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_852) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_852(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_852(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_853) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_853(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_853(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_854) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_854(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_854(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_855) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_855(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_855(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_856) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_856(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_856(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_857) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_857(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_857(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_858) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_858(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_858(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_859) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_859(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_859(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_860) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_860(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_860(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_861) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_861(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_861(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_862) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_862(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_862(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_863) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_863(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_863(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_864) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_864(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_864(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_865) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_865(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_865(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_866) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_866(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_866(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_867) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_867(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_867(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_868) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_868(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_868(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_869) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_869(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_869(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_870) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_870(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_870(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_871) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_871(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_871(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_872) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_872(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_872(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_873) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_873(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_873(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_874) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_874(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_874(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_875) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_875(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_875(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_876) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_876(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_876(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_877) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_877(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_877(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_878) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_878(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_878(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_879) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_879(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_879(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_880) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_880(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_880(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_881) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_881(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_881(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_882) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_882(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_882(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_883) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_883(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_883(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_884) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_884(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_884(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_885) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_885(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_885(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_886) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_886(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_886(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_887) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_887(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_887(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_888) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_888(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_888(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_889) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_889(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_889(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_890) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_890(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_890(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_891) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_891(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_891(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_892) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_892(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_892(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_893) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_893(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_893(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_894) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_894(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_894(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_895) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_895(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_895(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_896) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_896(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_896(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_897) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_897(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_897(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_898) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_898(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_898(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_899) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_899(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_899(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_900) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_900(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_900(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_901) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_901(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_901(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_902) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_902(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_902(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_903) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_903(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_903(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_904) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_904(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_904(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_905) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_905(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_905(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_906) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_906(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_906(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_907) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_907(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_907(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_908) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_908(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_908(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_909) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_909(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_909(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_910) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_910(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_910(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_911) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_911(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_911(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_912) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_912(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_912(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_913) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_913(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_913(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_914) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_914(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_914(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_915) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_915(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_915(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_916) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_916(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_916(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_917) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_917(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_917(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_918) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_918(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_918(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_919) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_919(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_919(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_920) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_920(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_920(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_921) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_921(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_921(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_922) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_922(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_922(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_923) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_923(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_923(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_924) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_924(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_924(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_925) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_925(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_925(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_926) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_926(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_926(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_927) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_927(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_927(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_928) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_928(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_928(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_929) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_929(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_929(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_930) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_930(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_930(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_931) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_931(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_931(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_932) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_932(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_932(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_933) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_933(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_933(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_934) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_934(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_934(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_935) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_935(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_935(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_936) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_936(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_936(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_937) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_937(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_937(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_938) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_938(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_938(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_939) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_939(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_939(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_940) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_940(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_940(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_941) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_941(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_941(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_942) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_942(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_942(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_943) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_943(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_943(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_944) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_944(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_944(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_945) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_945(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_945(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_946) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_946(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_946(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_947) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_947(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_947(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_948) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_948(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_948(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_949) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_949(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_949(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_950) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_950(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_950(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_951) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_951(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_951(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_952) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_952(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_952(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_953) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_953(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_953(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_954) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_954(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_954(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_955) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_955(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_955(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_956) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_956(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_956(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_957) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_957(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_957(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_958) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_958(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_958(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_959) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_959(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_959(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_960) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_960(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_960(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_961) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_961(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_961(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_962) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_962(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_962(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_963) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_963(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_963(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_964) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_964(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_964(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_965) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_965(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_965(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_966) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_966(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_966(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_967) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_967(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_967(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_968) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_968(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_968(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_969) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_969(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_969(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_970) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_970(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_970(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_971) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_971(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_971(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_972) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_972(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_972(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_973) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_973(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_973(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_974) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_974(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_974(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_975) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_975(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_975(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_976) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_976(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_976(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_977) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_977(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_977(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_978) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_978(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_978(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_979) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_979(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_979(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_980) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_980(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_980(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_981) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_981(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_981(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_982) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_982(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_982(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_983) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_983(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_983(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_984) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_984(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_984(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_985) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_985(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_985(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_986) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_986(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_986(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_987) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_987(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_987(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_988) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_988(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_988(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_989) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_989(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_989(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_990) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_990(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_990(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_991) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_991(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_991(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_992) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_992(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_992(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_993) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_993(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_993(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_994) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_994(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_994(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_995) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_995(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_995(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_996) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_996(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_996(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_997) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_997(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_997(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_998) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_998(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_998(FUNC) +#endif + +#if defined(EIGEN_TEST_PART_999) || defined(EIGEN_TEST_PART_ALL) +#define CALL_SUBTEST_999(FUNC) CALL_SUBTEST(FUNC) +#else +#define CALL_SUBTEST_999(FUNC) +#endif + diff --git a/test/spqr_support.cpp b/test/spqr_support.cpp index 81e63b6a5..79c2c12fc 100644 --- a/test/spqr_support.cpp +++ b/test/spqr_support.cpp @@ -57,7 +57,7 @@ template<typename Scalar> void test_spqr_scalar() refX = dA.colPivHouseholderQr().solve(b); VERIFY(x.isApprox(refX,test_precision<Scalar>())); } -void test_spqr_support() +EIGEN_DECLARE_TEST(spqr_support) { CALL_SUBTEST_1(test_spqr_scalar<double>()); CALL_SUBTEST_2(test_spqr_scalar<std::complex<double> >()); diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp index c3eb5ff31..cb8a80c18 100644 --- a/test/stable_norm.cpp +++ b/test/stable_norm.cpp @@ -21,7 +21,6 @@ template<typename MatrixType> void stable_norm(const MatrixType& m) */ using std::sqrt; using std::abs; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; @@ -65,6 +64,8 @@ template<typename MatrixType> void stable_norm(const MatrixType& m) factor = internal::random<Scalar>(); Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4)); + Scalar one(1); + MatrixType vzero = MatrixType::Zero(rows, cols), vrand = MatrixType::Random(rows, cols), vbig(rows, cols), @@ -78,6 +79,14 @@ template<typename MatrixType> void stable_norm(const MatrixType& m) VERIFY_IS_APPROX(vrand.blueNorm(), vrand.norm()); VERIFY_IS_APPROX(vrand.hypotNorm(), vrand.norm()); + // test with expressions as input + VERIFY_IS_APPROX((one*vrand).stableNorm(), vrand.norm()); + VERIFY_IS_APPROX((one*vrand).blueNorm(), vrand.norm()); + VERIFY_IS_APPROX((one*vrand).hypotNorm(), vrand.norm()); + VERIFY_IS_APPROX((one*vrand+one*vrand-one*vrand).stableNorm(), vrand.norm()); + VERIFY_IS_APPROX((one*vrand+one*vrand-one*vrand).blueNorm(), vrand.norm()); + VERIFY_IS_APPROX((one*vrand+one*vrand-one*vrand).hypotNorm(), vrand.norm()); + RealScalar size = static_cast<RealScalar>(m.size()); // test numext::isfinite @@ -161,7 +170,13 @@ template<typename MatrixType> void stable_norm(const MatrixType& m) VERIFY(!(numext::isfinite)(v.norm())); VERIFY((numext::isnan)(v.norm())); VERIFY(!(numext::isfinite)(v.stableNorm())); VERIFY((numext::isnan)(v.stableNorm())); VERIFY(!(numext::isfinite)(v.blueNorm())); VERIFY((numext::isnan)(v.blueNorm())); - VERIFY(!(numext::isfinite)(v.hypotNorm())); VERIFY((numext::isnan)(v.hypotNorm())); + if (i2 != i || j2 != j) { + // hypot propagates inf over NaN. + VERIFY(!(numext::isfinite)(v.hypotNorm())); VERIFY((numext::isinf)(v.hypotNorm())); + } else { + // inf is overwritten by NaN, expect norm to be NaN. + VERIFY(!(numext::isfinite)(v.hypotNorm())); VERIFY((numext::isnan)(v.hypotNorm())); + } } // stableNormalize[d] @@ -180,13 +195,51 @@ template<typename MatrixType> void stable_norm(const MatrixType& m) } } -void test_stable_norm() +template<typename Scalar> +void test_hypot() +{ + typedef typename NumTraits<Scalar>::Real RealScalar; + Scalar factor = internal::random<Scalar>(); + while(numext::abs2(factor)<RealScalar(1e-4)) + factor = internal::random<Scalar>(); + Scalar big = factor * ((std::numeric_limits<RealScalar>::max)() * RealScalar(1e-4)); + + factor = internal::random<Scalar>(); + while(numext::abs2(factor)<RealScalar(1e-4)) + factor = internal::random<Scalar>(); + Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4)); + + Scalar one (1), + zero (0), + sqrt2 (std::sqrt(2)), + nan (std::numeric_limits<RealScalar>::quiet_NaN()); + + Scalar a = internal::random<Scalar>(-1,1); + Scalar b = internal::random<Scalar>(-1,1); + VERIFY_IS_APPROX(numext::hypot(a,b),std::sqrt(numext::abs2(a)+numext::abs2(b))); + VERIFY_IS_EQUAL(numext::hypot(zero,zero), zero); + VERIFY_IS_APPROX(numext::hypot(one, one), sqrt2); + VERIFY_IS_APPROX(numext::hypot(big,big), sqrt2*numext::abs(big)); + VERIFY_IS_APPROX(numext::hypot(small,small), sqrt2*numext::abs(small)); + VERIFY_IS_APPROX(numext::hypot(small,big), numext::abs(big)); + VERIFY((numext::isnan)(numext::hypot(nan,a))); + VERIFY((numext::isnan)(numext::hypot(a,nan))); +} + +EIGEN_DECLARE_TEST(stable_norm) { for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_3( test_hypot<double>() ); + CALL_SUBTEST_4( test_hypot<float>() ); + CALL_SUBTEST_5( test_hypot<std::complex<double> >() ); + CALL_SUBTEST_6( test_hypot<std::complex<float> >() ); + CALL_SUBTEST_1( stable_norm(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( stable_norm(Vector4d()) ); CALL_SUBTEST_3( stable_norm(VectorXd(internal::random<int>(10,2000))) ); + CALL_SUBTEST_3( stable_norm(MatrixXd(internal::random<int>(10,200), internal::random<int>(10,200))) ); CALL_SUBTEST_4( stable_norm(VectorXf(internal::random<int>(10,2000))) ); CALL_SUBTEST_5( stable_norm(VectorXcd(internal::random<int>(10,2000))) ); + CALL_SUBTEST_6( stable_norm(VectorXcf(internal::random<int>(10,2000))) ); } } diff --git a/test/stddeque.cpp b/test/stddeque.cpp index bb4b476f3..ea85ea968 100644 --- a/test/stddeque.cpp +++ b/test/stddeque.cpp @@ -15,12 +15,10 @@ template<typename MatrixType> void check_stddeque_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; - Index rows = m.rows(); Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); - std::deque<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType(rows,cols)), w(20, y); + std::deque<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType::Zero(rows,cols)), w(20, y); v.front() = x; w.front() = w.back(); VERIFY_IS_APPROX(w.front(), w.back()); @@ -35,7 +33,7 @@ void check_stddeque_matrix(const MatrixType& m) ++wi; } - v.resize(21); + v.resize(21,MatrixType::Zero(rows,cols)); v.back() = x; VERIFY_IS_APPROX(v.back(), x); v.resize(22,y); @@ -48,8 +46,8 @@ template<typename TransformType> void check_stddeque_transform(const TransformType&) { typedef typename TransformType::MatrixType MatrixType; - TransformType x(MatrixType::Random()), y(MatrixType::Random()); - std::deque<TransformType,Eigen::aligned_allocator<TransformType> > v(10), w(20, y); + TransformType x(MatrixType::Random()), y(MatrixType::Random()), ti=TransformType::Identity(); + std::deque<TransformType,Eigen::aligned_allocator<TransformType> > v(10,ti), w(20, y); v.front() = x; w.front() = w.back(); VERIFY_IS_APPROX(w.front(), w.back()); @@ -64,7 +62,7 @@ void check_stddeque_transform(const TransformType&) ++wi; } - v.resize(21); + v.resize(21,ti); v.back() = x; VERIFY_IS_APPROX(v.back(), x); v.resize(22,y); @@ -77,8 +75,8 @@ template<typename QuaternionType> void check_stddeque_quaternion(const QuaternionType&) { typedef typename QuaternionType::Coefficients Coefficients; - QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); - std::deque<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10), w(20, y); + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity(); + std::deque<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10,qi), w(20, y); v.front() = x; w.front() = w.back(); VERIFY_IS_APPROX(w.front(), w.back()); @@ -93,7 +91,7 @@ void check_stddeque_quaternion(const QuaternionType&) ++wi; } - v.resize(21); + v.resize(21,qi); v.back() = x; VERIFY_IS_APPROX(v.back(), x); v.resize(22,y); @@ -102,7 +100,7 @@ void check_stddeque_quaternion(const QuaternionType&) VERIFY_IS_APPROX(v.back(), x); } -void test_stddeque() +EIGEN_DECLARE_TEST(stddeque) { // some non vectorizable fixed sizes CALL_SUBTEST_1(check_stddeque_matrix(Vector2f())); diff --git a/test/stddeque_overload.cpp b/test/stddeque_overload.cpp index 4da618bbf..0f59f0695 100644 --- a/test/stddeque_overload.cpp +++ b/test/stddeque_overload.cpp @@ -28,10 +28,10 @@ EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Quaterniond) template<typename MatrixType> void check_stddeque_matrix(const MatrixType& m) { - typename MatrixType::Index rows = m.rows(); - typename MatrixType::Index cols = m.cols(); + Index rows = m.rows(); + Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); - std::deque<MatrixType> v(10, MatrixType(rows,cols)), w(20, y); + std::deque<MatrixType> v(10, MatrixType::Zero(rows,cols)), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -64,8 +64,8 @@ template<typename TransformType> void check_stddeque_transform(const TransformType&) { typedef typename TransformType::MatrixType MatrixType; - TransformType x(MatrixType::Random()), y(MatrixType::Random()); - std::deque<TransformType> v(10), w(20, y); + TransformType x(MatrixType::Random()), y(MatrixType::Random()), ti=TransformType::Identity(); + std::deque<TransformType> v(10,ti), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -75,7 +75,7 @@ void check_stddeque_transform(const TransformType&) VERIFY_IS_APPROX(w[i], v[i]); } - v.resize(21); + v.resize(21,ti); v[20] = x; VERIFY_IS_APPROX(v[20], x); v.resize(22,y); @@ -98,8 +98,8 @@ template<typename QuaternionType> void check_stddeque_quaternion(const QuaternionType&) { typedef typename QuaternionType::Coefficients Coefficients; - QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); - std::deque<QuaternionType> v(10), w(20, y); + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity(); + std::deque<QuaternionType> v(10,qi), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -109,7 +109,7 @@ void check_stddeque_quaternion(const QuaternionType&) VERIFY_IS_APPROX(w[i], v[i]); } - v.resize(21); + v.resize(21,qi); v[20] = x; VERIFY_IS_APPROX(v[20], x); v.resize(22,y); @@ -128,7 +128,7 @@ void check_stddeque_quaternion(const QuaternionType&) } } -void test_stddeque_overload() +EIGEN_DECLARE_TEST(stddeque_overload) { // some non vectorizable fixed sizes CALL_SUBTEST_1(check_stddeque_matrix(Vector2f())); diff --git a/test/stdlist.cpp b/test/stdlist.cpp index 17cce779a..1af9e6ecb 100644 --- a/test/stdlist.cpp +++ b/test/stdlist.cpp @@ -15,12 +15,10 @@ template<typename MatrixType> void check_stdlist_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; - Index rows = m.rows(); Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); - std::list<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType(rows,cols)), w(20, y); + std::list<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType::Zero(rows,cols)), w(20, y); v.front() = x; w.front() = w.back(); VERIFY_IS_APPROX(w.front(), w.back()); @@ -35,7 +33,7 @@ void check_stdlist_matrix(const MatrixType& m) ++wi; } - v.resize(21); + v.resize(21, MatrixType::Zero(rows,cols)); v.back() = x; VERIFY_IS_APPROX(v.back(), x); v.resize(22,y); @@ -48,8 +46,8 @@ template<typename TransformType> void check_stdlist_transform(const TransformType&) { typedef typename TransformType::MatrixType MatrixType; - TransformType x(MatrixType::Random()), y(MatrixType::Random()); - std::list<TransformType,Eigen::aligned_allocator<TransformType> > v(10), w(20, y); + TransformType x(MatrixType::Random()), y(MatrixType::Random()), ti=TransformType::Identity(); + std::list<TransformType,Eigen::aligned_allocator<TransformType> > v(10,ti), w(20, y); v.front() = x; w.front() = w.back(); VERIFY_IS_APPROX(w.front(), w.back()); @@ -64,7 +62,7 @@ void check_stdlist_transform(const TransformType&) ++wi; } - v.resize(21); + v.resize(21, ti); v.back() = x; VERIFY_IS_APPROX(v.back(), x); v.resize(22,y); @@ -77,8 +75,8 @@ template<typename QuaternionType> void check_stdlist_quaternion(const QuaternionType&) { typedef typename QuaternionType::Coefficients Coefficients; - QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); - std::list<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10), w(20, y); + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity(); + std::list<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10,qi), w(20, y); v.front() = x; w.front() = w.back(); VERIFY_IS_APPROX(w.front(), w.back()); @@ -93,7 +91,7 @@ void check_stdlist_quaternion(const QuaternionType&) ++wi; } - v.resize(21); + v.resize(21,qi); v.back() = x; VERIFY_IS_APPROX(v.back(), x); v.resize(22,y); @@ -102,7 +100,7 @@ void check_stdlist_quaternion(const QuaternionType&) VERIFY_IS_APPROX(v.back(), x); } -void test_stdlist() +EIGEN_DECLARE_TEST(stdlist) { // some non vectorizable fixed sizes CALL_SUBTEST_1(check_stdlist_matrix(Vector2f())); diff --git a/test/stdlist_overload.cpp b/test/stdlist_overload.cpp index bb910bd43..a78516e24 100644 --- a/test/stdlist_overload.cpp +++ b/test/stdlist_overload.cpp @@ -44,10 +44,10 @@ void set(Container & c, Position position, const Value & value) template<typename MatrixType> void check_stdlist_matrix(const MatrixType& m) { - typename MatrixType::Index rows = m.rows(); - typename MatrixType::Index cols = m.cols(); + Index rows = m.rows(); + Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); - std::list<MatrixType> v(10, MatrixType(rows,cols)), w(20, y); + std::list<MatrixType> v(10, MatrixType::Zero(rows,cols)), w(20, y); typename std::list<MatrixType>::iterator itv = get(v, 5); typename std::list<MatrixType>::iterator itw = get(w, 6); *itv = x; @@ -86,8 +86,8 @@ template<typename TransformType> void check_stdlist_transform(const TransformType&) { typedef typename TransformType::MatrixType MatrixType; - TransformType x(MatrixType::Random()), y(MatrixType::Random()); - std::list<TransformType> v(10), w(20, y); + TransformType x(MatrixType::Random()), y(MatrixType::Random()), ti=TransformType::Identity(); + std::list<TransformType> v(10,ti), w(20, y); typename std::list<TransformType>::iterator itv = get(v, 5); typename std::list<TransformType>::iterator itw = get(w, 6); *itv = x; @@ -103,7 +103,7 @@ void check_stdlist_transform(const TransformType&) ++itw; } - v.resize(21); + v.resize(21, ti); set(v, 20, x); VERIFY_IS_APPROX(*get(v, 20), x); v.resize(22,y); @@ -126,8 +126,8 @@ template<typename QuaternionType> void check_stdlist_quaternion(const QuaternionType&) { typedef typename QuaternionType::Coefficients Coefficients; - QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); - std::list<QuaternionType> v(10), w(20, y); + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity(); + std::list<QuaternionType> v(10,qi), w(20, y); typename std::list<QuaternionType>::iterator itv = get(v, 5); typename std::list<QuaternionType>::iterator itw = get(w, 6); *itv = x; @@ -143,7 +143,7 @@ void check_stdlist_quaternion(const QuaternionType&) ++itw; } - v.resize(21); + v.resize(21,qi); set(v, 20, x); VERIFY_IS_APPROX(*get(v, 20), x); v.resize(22,y); @@ -162,7 +162,7 @@ void check_stdlist_quaternion(const QuaternionType&) } } -void test_stdlist_overload() +EIGEN_DECLARE_TEST(stdlist_overload) { // some non vectorizable fixed sizes CALL_SUBTEST_1(check_stdlist_matrix(Vector2f())); diff --git a/test/stdvector.cpp b/test/stdvector.cpp index 50cb3341d..18de240c6 100644 --- a/test/stdvector.cpp +++ b/test/stdvector.cpp @@ -14,10 +14,10 @@ template<typename MatrixType> void check_stdvector_matrix(const MatrixType& m) { - typename MatrixType::Index rows = m.rows(); - typename MatrixType::Index cols = m.cols(); + Index rows = m.rows(); + Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); - std::vector<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType(rows,cols)), w(20, y); + std::vector<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType::Zero(rows,cols)), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -86,8 +86,8 @@ template<typename QuaternionType> void check_stdvector_quaternion(const QuaternionType&) { typedef typename QuaternionType::Coefficients Coefficients; - QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); - std::vector<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10), w(20, y); + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity(); + std::vector<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10,qi), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -117,7 +117,17 @@ void check_stdvector_quaternion(const QuaternionType&) } } -void test_stdvector() +// the code below triggered an invalid warning with gcc >= 7 +// eigen/Eigen/src/Core/util/Memory.h:189:12: warning: argument 1 value '18446744073709551612' exceeds maximum object size 9223372036854775807 +// This has been reported to gcc there: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87544 +void std_vector_gcc_warning() +{ + typedef Eigen::Vector3f T; + std::vector<T, Eigen::aligned_allocator<T> > v; + v.push_back(T()); +} + +EIGEN_DECLARE_TEST(stdvector) { // some non vectorizable fixed sizes CALL_SUBTEST_1(check_stdvector_matrix(Vector2f())); diff --git a/test/stdvector_overload.cpp b/test/stdvector_overload.cpp index 959665954..da04f8a84 100644 --- a/test/stdvector_overload.cpp +++ b/test/stdvector_overload.cpp @@ -28,10 +28,10 @@ EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Quaterniond) template<typename MatrixType> void check_stdvector_matrix(const MatrixType& m) { - typename MatrixType::Index rows = m.rows(); - typename MatrixType::Index cols = m.cols(); + Index rows = m.rows(); + Index cols = m.cols(); MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); - std::vector<MatrixType> v(10, MatrixType(rows,cols)), w(20, y); + std::vector<MatrixType> v(10, MatrixType::Zero(rows,cols)), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -100,8 +100,8 @@ template<typename QuaternionType> void check_stdvector_quaternion(const QuaternionType&) { typedef typename QuaternionType::Coefficients Coefficients; - QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); - std::vector<QuaternionType> v(10), w(20, y); + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity(); + std::vector<QuaternionType> v(10,qi), w(20, y); v[5] = x; w[6] = v[5]; VERIFY_IS_APPROX(w[6], v[5]); @@ -131,7 +131,7 @@ void check_stdvector_quaternion(const QuaternionType&) } } -void test_stdvector_overload() +EIGEN_DECLARE_TEST(stdvector_overload) { // some non vectorizable fixed sizes CALL_SUBTEST_1(check_stdvector_matrix(Vector2f())); diff --git a/test/stl_iterators.cpp b/test/stl_iterators.cpp new file mode 100644 index 000000000..72bbf8250 --- /dev/null +++ b/test/stl_iterators.cpp @@ -0,0 +1,562 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018-2019 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include <iterator> +#include <numeric> + +template< class Iterator > +std::reverse_iterator<Iterator> +make_reverse_iterator( Iterator i ) +{ + return std::reverse_iterator<Iterator>(i); +} + +#if !EIGEN_HAS_CXX11 +template<class ForwardIt> +ForwardIt is_sorted_until(ForwardIt firstIt, ForwardIt lastIt) +{ + if (firstIt != lastIt) { + ForwardIt next = firstIt; + while (++next != lastIt) { + if (*next < *firstIt) + return next; + firstIt = next; + } + } + return lastIt; +} +template<class ForwardIt> +bool is_sorted(ForwardIt firstIt, ForwardIt lastIt) +{ + return ::is_sorted_until(firstIt, lastIt) == lastIt; +} +#else +using std::is_sorted; +#endif + +template<typename XprType> +bool is_pointer_based_stl_iterator(const internal::pointer_based_stl_iterator<XprType> &) { return true; } + +template<typename XprType> +bool is_generic_randaccess_stl_iterator(const internal::generic_randaccess_stl_iterator<XprType> &) { return true; } + +template<typename Iter> +bool is_default_constructible_and_assignable(const Iter& it) +{ +#if EIGEN_HAS_CXX11 + VERIFY(std::is_default_constructible<Iter>::value); + VERIFY(std::is_nothrow_default_constructible<Iter>::value); +#endif + Iter it2; + it2 = it; + return (it==it2); +} + +template<typename Xpr> +void check_begin_end_for_loop(Xpr xpr) +{ + const Xpr& cxpr(xpr); + Index i = 0; + + i = 0; + for(typename Xpr::iterator it = xpr.begin(); it!=xpr.end(); ++it) { VERIFY_IS_EQUAL(*it,xpr[i++]); } + + i = 0; + for(typename Xpr::const_iterator it = xpr.cbegin(); it!=xpr.cend(); ++it) { VERIFY_IS_EQUAL(*it,xpr[i++]); } + + i = 0; + for(typename Xpr::const_iterator it = cxpr.begin(); it!=cxpr.end(); ++it) { VERIFY_IS_EQUAL(*it,xpr[i++]); } + + i = 0; + for(typename Xpr::const_iterator it = xpr.begin(); it!=xpr.end(); ++it) { VERIFY_IS_EQUAL(*it,xpr[i++]); } + + { + // simple API check + typename Xpr::const_iterator cit = xpr.begin(); + cit = xpr.cbegin(); + + #if EIGEN_HAS_CXX11 + auto tmp1 = xpr.begin(); + VERIFY(tmp1==xpr.begin()); + auto tmp2 = xpr.cbegin(); + VERIFY(tmp2==xpr.cbegin()); + #endif + } + + VERIFY( xpr.end() -xpr.begin() == xpr.size() ); + VERIFY( xpr.cend()-xpr.begin() == xpr.size() ); + VERIFY( xpr.end() -xpr.cbegin() == xpr.size() ); + VERIFY( xpr.cend()-xpr.cbegin() == xpr.size() ); + + if(xpr.size()>0) { + VERIFY(xpr.begin() != xpr.end()); + VERIFY(xpr.begin() < xpr.end()); + VERIFY(xpr.begin() <= xpr.end()); + VERIFY(!(xpr.begin() == xpr.end())); + VERIFY(!(xpr.begin() > xpr.end())); + VERIFY(!(xpr.begin() >= xpr.end())); + + VERIFY(xpr.cbegin() != xpr.end()); + VERIFY(xpr.cbegin() < xpr.end()); + VERIFY(xpr.cbegin() <= xpr.end()); + VERIFY(!(xpr.cbegin() == xpr.end())); + VERIFY(!(xpr.cbegin() > xpr.end())); + VERIFY(!(xpr.cbegin() >= xpr.end())); + + VERIFY(xpr.begin() != xpr.cend()); + VERIFY(xpr.begin() < xpr.cend()); + VERIFY(xpr.begin() <= xpr.cend()); + VERIFY(!(xpr.begin() == xpr.cend())); + VERIFY(!(xpr.begin() > xpr.cend())); + VERIFY(!(xpr.begin() >= xpr.cend())); + } +} + +template<typename Scalar, int Rows, int Cols> +void test_stl_iterators(int rows=Rows, int cols=Cols) +{ + typedef Matrix<Scalar,Rows,1> VectorType; + #if EIGEN_HAS_CXX11 + typedef Matrix<Scalar,1,Cols> RowVectorType; + #endif + typedef Matrix<Scalar,Rows,Cols,ColMajor> ColMatrixType; + typedef Matrix<Scalar,Rows,Cols,RowMajor> RowMatrixType; + VectorType v = VectorType::Random(rows); + const VectorType& cv(v); + ColMatrixType A = ColMatrixType::Random(rows,cols); + const ColMatrixType& cA(A); + RowMatrixType B = RowMatrixType::Random(rows,cols); + + Index i, j; + + // Verify that iterators are default constructible (See bug #1900) + { + VERIFY( is_default_constructible_and_assignable(v.begin())); + VERIFY( is_default_constructible_and_assignable(v.end())); + VERIFY( is_default_constructible_and_assignable(cv.begin())); + VERIFY( is_default_constructible_and_assignable(cv.end())); + + VERIFY( is_default_constructible_and_assignable(A.row(0).begin())); + VERIFY( is_default_constructible_and_assignable(A.row(0).end())); + VERIFY( is_default_constructible_and_assignable(cA.row(0).begin())); + VERIFY( is_default_constructible_and_assignable(cA.row(0).end())); + + VERIFY( is_default_constructible_and_assignable(B.row(0).begin())); + VERIFY( is_default_constructible_and_assignable(B.row(0).end())); + } + + // Check we got a fast pointer-based iterator when expected + { + VERIFY( is_pointer_based_stl_iterator(v.begin()) ); + VERIFY( is_pointer_based_stl_iterator(v.end()) ); + VERIFY( is_pointer_based_stl_iterator(cv.begin()) ); + VERIFY( is_pointer_based_stl_iterator(cv.end()) ); + + j = internal::random<Index>(0,A.cols()-1); + VERIFY( is_pointer_based_stl_iterator(A.col(j).begin()) ); + VERIFY( is_pointer_based_stl_iterator(A.col(j).end()) ); + VERIFY( is_pointer_based_stl_iterator(cA.col(j).begin()) ); + VERIFY( is_pointer_based_stl_iterator(cA.col(j).end()) ); + + i = internal::random<Index>(0,A.rows()-1); + VERIFY( is_pointer_based_stl_iterator(A.row(i).begin()) ); + VERIFY( is_pointer_based_stl_iterator(A.row(i).end()) ); + VERIFY( is_pointer_based_stl_iterator(cA.row(i).begin()) ); + VERIFY( is_pointer_based_stl_iterator(cA.row(i).end()) ); + + VERIFY( is_pointer_based_stl_iterator(A.reshaped().begin()) ); + VERIFY( is_pointer_based_stl_iterator(A.reshaped().end()) ); + VERIFY( is_pointer_based_stl_iterator(cA.reshaped().begin()) ); + VERIFY( is_pointer_based_stl_iterator(cA.reshaped().end()) ); + + VERIFY( is_pointer_based_stl_iterator(B.template reshaped<AutoOrder>().begin()) ); + VERIFY( is_pointer_based_stl_iterator(B.template reshaped<AutoOrder>().end()) ); + + VERIFY( is_generic_randaccess_stl_iterator(A.template reshaped<RowMajor>().begin()) ); + VERIFY( is_generic_randaccess_stl_iterator(A.template reshaped<RowMajor>().end()) ); + } + + { + check_begin_end_for_loop(v); + check_begin_end_for_loop(A.col(internal::random<Index>(0,A.cols()-1))); + check_begin_end_for_loop(A.row(internal::random<Index>(0,A.rows()-1))); + check_begin_end_for_loop(v+v); + } + +#if EIGEN_HAS_CXX11 + // check swappable + { + using std::swap; + // pointer-based + { + VectorType v_copy = v; + auto a = v.begin(); + auto b = v.end()-1; + swap(a,b); + VERIFY_IS_EQUAL(v,v_copy); + VERIFY_IS_EQUAL(*b,*v.begin()); + VERIFY_IS_EQUAL(*b,v(0)); + VERIFY_IS_EQUAL(*a,v.end()[-1]); + VERIFY_IS_EQUAL(*a,v(last)); + } + + // generic + { + RowMatrixType B_copy = B; + auto Br = B.reshaped(); + auto a = Br.begin(); + auto b = Br.end()-1; + swap(a,b); + VERIFY_IS_EQUAL(B,B_copy); + VERIFY_IS_EQUAL(*b,*Br.begin()); + VERIFY_IS_EQUAL(*b,Br(0)); + VERIFY_IS_EQUAL(*a,Br.end()[-1]); + VERIFY_IS_EQUAL(*a,Br(last)); + } + } + + // check non-const iterator with for-range loops + { + i = 0; + for(auto x : v) { VERIFY_IS_EQUAL(x,v[i++]); } + + j = internal::random<Index>(0,A.cols()-1); + i = 0; + for(auto x : A.col(j)) { VERIFY_IS_EQUAL(x,A(i++,j)); } + + i = 0; + for(auto x : (v+A.col(j))) { VERIFY_IS_APPROX(x,v(i)+A(i,j)); ++i; } + + j = 0; + i = internal::random<Index>(0,A.rows()-1); + for(auto x : A.row(i)) { VERIFY_IS_EQUAL(x,A(i,j++)); } + + i = 0; + for(auto x : A.reshaped()) { VERIFY_IS_EQUAL(x,A(i++)); } + } + + // same for const_iterator + { + i = 0; + for(auto x : cv) { VERIFY_IS_EQUAL(x,v[i++]); } + + i = 0; + for(auto x : cA.reshaped()) { VERIFY_IS_EQUAL(x,A(i++)); } + + j = 0; + i = internal::random<Index>(0,A.rows()-1); + for(auto x : cA.row(i)) { VERIFY_IS_EQUAL(x,A(i,j++)); } + } + + // check reshaped() on row-major + { + i = 0; + Matrix<Scalar,Dynamic,Dynamic,ColMajor> Bc = B; + for(auto x : B.reshaped()) { VERIFY_IS_EQUAL(x,Bc(i++)); } + } + + // check write access + { + VectorType w(v.size()); + i = 0; + for(auto& x : w) { x = v(i++); } + VERIFY_IS_EQUAL(v,w); + } + + // check for dangling pointers + { + // no dangling because pointer-based + { + j = internal::random<Index>(0,A.cols()-1); + auto it = A.col(j).begin(); + for(i=0;i<rows;++i) { + VERIFY_IS_EQUAL(it[i],A(i,j)); + } + } + + // no dangling because pointer-based + { + i = internal::random<Index>(0,A.rows()-1); + auto it = A.row(i).begin(); + for(j=0;j<cols;++j) { VERIFY_IS_EQUAL(it[j],A(i,j)); } + } + + { + j = internal::random<Index>(0,A.cols()-1); + // this would produce a dangling pointer: + // auto it = (A+2*A).col(j).begin(); + // we need to name the temporary expression: + auto tmp = (A+2*A).col(j); + auto it = tmp.begin(); + for(i=0;i<rows;++i) { + VERIFY_IS_APPROX(it[i],3*A(i,j)); + } + } + } + + { + // check basic for loop on vector-wise iterators + j=0; + for (auto it = A.colwise().cbegin(); it != A.colwise().cend(); ++it, ++j) { + VERIFY_IS_APPROX( it->coeff(0), A(0,j) ); + VERIFY_IS_APPROX( (*it).coeff(0), A(0,j) ); + } + j=0; + for (auto it = A.colwise().begin(); it != A.colwise().end(); ++it, ++j) { + (*it).coeffRef(0) = (*it).coeff(0); // compilation check + it->coeffRef(0) = it->coeff(0); // compilation check + VERIFY_IS_APPROX( it->coeff(0), A(0,j) ); + VERIFY_IS_APPROX( (*it).coeff(0), A(0,j) ); + } + + // check valuetype gives us a copy + j=0; + for (auto it = A.colwise().cbegin(); it != A.colwise().cend(); ++it, ++j) { + typename decltype(it)::value_type tmp = *it; + VERIFY_IS_NOT_EQUAL( tmp.data() , it->data() ); + VERIFY_IS_APPROX( tmp, A.col(j) ); + } + } + +#endif + + if(rows>=3) { + VERIFY_IS_EQUAL((v.begin()+rows/2)[1], v(rows/2+1)); + + VERIFY_IS_EQUAL((A.rowwise().begin()+rows/2)[1], A.row(rows/2+1)); + } + + if(cols>=3) { + VERIFY_IS_EQUAL((A.colwise().begin()+cols/2)[1], A.col(cols/2+1)); + } + + // check std::sort + { + // first check that is_sorted returns false when required + if(rows>=2) + { + v(1) = v(0)-Scalar(1); + #if EIGEN_HAS_CXX11 + VERIFY(!is_sorted(std::begin(v),std::end(v))); + #else + VERIFY(!is_sorted(v.cbegin(),v.cend())); + #endif + } + + // on a vector + { + std::sort(v.begin(),v.end()); + VERIFY(is_sorted(v.begin(),v.end())); + VERIFY(!::is_sorted(make_reverse_iterator(v.end()),make_reverse_iterator(v.begin()))); + } + + // on a column of a column-major matrix -> pointer-based iterator and default increment + { + j = internal::random<Index>(0,A.cols()-1); + // std::sort(begin(A.col(j)),end(A.col(j))); // does not compile because this returns const iterators + typename ColMatrixType::ColXpr Acol = A.col(j); + std::sort(Acol.begin(),Acol.end()); + VERIFY(is_sorted(Acol.cbegin(),Acol.cend())); + A.setRandom(); + + std::sort(A.col(j).begin(),A.col(j).end()); + VERIFY(is_sorted(A.col(j).cbegin(),A.col(j).cend())); + A.setRandom(); + } + + // on a row of a rowmajor matrix -> pointer-based iterator and runtime increment + { + i = internal::random<Index>(0,A.rows()-1); + typename ColMatrixType::RowXpr Arow = A.row(i); + VERIFY_IS_EQUAL( std::distance(Arow.begin(),Arow.end()), cols); + std::sort(Arow.begin(),Arow.end()); + VERIFY(is_sorted(Arow.cbegin(),Arow.cend())); + A.setRandom(); + + std::sort(A.row(i).begin(),A.row(i).end()); + VERIFY(is_sorted(A.row(i).cbegin(),A.row(i).cend())); + A.setRandom(); + } + + // with a generic iterator + { + Reshaped<RowMatrixType,RowMatrixType::SizeAtCompileTime,1> B1 = B.reshaped(); + std::sort(B1.begin(),B1.end()); + VERIFY(is_sorted(B1.cbegin(),B1.cend())); + B.setRandom(); + + // assertion because nested expressions are different + // std::sort(B.reshaped().begin(),B.reshaped().end()); + // VERIFY(is_sorted(B.reshaped().cbegin(),B.reshaped().cend())); + // B.setRandom(); + } + } + + // check with partial_sum + { + j = internal::random<Index>(0,A.cols()-1); + typename ColMatrixType::ColXpr Acol = A.col(j); + std::partial_sum(Acol.begin(), Acol.end(), v.begin()); + VERIFY_IS_APPROX(v(seq(1,last)), v(seq(0,last-1))+Acol(seq(1,last))); + + // inplace + std::partial_sum(Acol.begin(), Acol.end(), Acol.begin()); + VERIFY_IS_APPROX(v, Acol); + } + + // stress random access as required by std::nth_element + if(rows>=3) + { + v.setRandom(); + VectorType v1 = v; + std::sort(v1.begin(),v1.end()); + std::nth_element(v.begin(), v.begin()+rows/2, v.end()); + VERIFY_IS_APPROX(v1(rows/2), v(rows/2)); + + v.setRandom(); + v1 = v; + std::sort(v1.begin()+rows/2,v1.end()); + std::nth_element(v.begin()+rows/2, v.begin()+rows/4, v.end()); + VERIFY_IS_APPROX(v1(rows/4), v(rows/4)); + } + +#if EIGEN_HAS_CXX11 + // check rows/cols iterators with range-for loops + { + j = 0; + for(auto c : A.colwise()) { VERIFY_IS_APPROX(c.sum(), A.col(j).sum()); ++j; } + j = 0; + for(auto c : B.colwise()) { VERIFY_IS_APPROX(c.sum(), B.col(j).sum()); ++j; } + + j = 0; + for(auto c : B.colwise()) { + i = 0; + for(auto& x : c) { + VERIFY_IS_EQUAL(x, B(i,j)); + x = A(i,j); + ++i; + } + ++j; + } + VERIFY_IS_APPROX(A,B); + B.setRandom(); + + i = 0; + for(auto r : A.rowwise()) { VERIFY_IS_APPROX(r.sum(), A.row(i).sum()); ++i; } + i = 0; + for(auto r : B.rowwise()) { VERIFY_IS_APPROX(r.sum(), B.row(i).sum()); ++i; } + } + + + // check rows/cols iterators with STL algorithms + { + RowVectorType row = RowVectorType::Random(cols); + A.rowwise() = row; + VERIFY( std::all_of(A.rowwise().begin(), A.rowwise().end(), [&row](typename ColMatrixType::RowXpr x) { return internal::isApprox(x.squaredNorm(),row.squaredNorm()); }) ); + VERIFY( std::all_of(A.rowwise().rbegin(), A.rowwise().rend(), [&row](typename ColMatrixType::RowXpr x) { return internal::isApprox(x.squaredNorm(),row.squaredNorm()); }) ); + + VectorType col = VectorType::Random(rows); + A.colwise() = col; + VERIFY( std::all_of(A.colwise().begin(), A.colwise().end(), [&col](typename ColMatrixType::ColXpr x) { return internal::isApprox(x.squaredNorm(),col.squaredNorm()); }) ); + VERIFY( std::all_of(A.colwise().rbegin(), A.colwise().rend(), [&col](typename ColMatrixType::ColXpr x) { return internal::isApprox(x.squaredNorm(),col.squaredNorm()); }) ); + VERIFY( std::all_of(A.colwise().cbegin(), A.colwise().cend(), [&col](typename ColMatrixType::ConstColXpr x) { return internal::isApprox(x.squaredNorm(),col.squaredNorm()); }) ); + VERIFY( std::all_of(A.colwise().crbegin(), A.colwise().crend(), [&col](typename ColMatrixType::ConstColXpr x) { return internal::isApprox(x.squaredNorm(),col.squaredNorm()); }) ); + + i = internal::random<Index>(0,A.rows()-1); + A.setRandom(); + A.row(i).setZero(); + VERIFY_IS_EQUAL( std::find_if(A.rowwise().begin(), A.rowwise().end(), [](typename ColMatrixType::RowXpr x) { return x.squaredNorm() == Scalar(0); })-A.rowwise().begin(), i ); + VERIFY_IS_EQUAL( std::find_if(A.rowwise().rbegin(), A.rowwise().rend(), [](typename ColMatrixType::RowXpr x) { return x.squaredNorm() == Scalar(0); })-A.rowwise().rbegin(), (A.rows()-1) - i ); + + j = internal::random<Index>(0,A.cols()-1); + A.setRandom(); + A.col(j).setZero(); + VERIFY_IS_EQUAL( std::find_if(A.colwise().begin(), A.colwise().end(), [](typename ColMatrixType::ColXpr x) { return x.squaredNorm() == Scalar(0); })-A.colwise().begin(), j ); + VERIFY_IS_EQUAL( std::find_if(A.colwise().rbegin(), A.colwise().rend(), [](typename ColMatrixType::ColXpr x) { return x.squaredNorm() == Scalar(0); })-A.colwise().rbegin(), (A.cols()-1) - j ); + } + + { + using VecOp = VectorwiseOp<ArrayXXi, 0>; + STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::declval<const VecOp&>().cbegin())>::value )); + STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::declval<const VecOp&>().cend ())>::value )); + #if EIGEN_COMP_CXXVER>=14 + STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cbegin(std::declval<const VecOp&>()))>::value )); + STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cend (std::declval<const VecOp&>()))>::value )); + #endif + } + +#endif +} + + +#if EIGEN_HAS_CXX11 +// When the compiler sees expression IsContainerTest<C>(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. +template <class C, + class Iterator = decltype(::std::declval<const C&>().begin()), + class = decltype(::std::declval<const C&>().end()), + class = decltype(++::std::declval<Iterator&>()), + class = decltype(*::std::declval<Iterator>()), + class = typename C::const_iterator> +bool IsContainerType(int /* dummy */) { return true; } + +template <class C> +bool IsContainerType(long /* dummy */) { return false; } + +template <typename Scalar, int Rows, int Cols> +void test_stl_container_detection(int rows=Rows, int cols=Cols) +{ + typedef Matrix<Scalar,Rows,1> VectorType; + typedef Matrix<Scalar,Rows,Cols,ColMajor> ColMatrixType; + typedef Matrix<Scalar,Rows,Cols,RowMajor> RowMatrixType; + + ColMatrixType A = ColMatrixType::Random(rows, cols); + RowMatrixType B = RowMatrixType::Random(rows, cols); + + Index i = 1; + + using ColMatrixColType = decltype(A.col(i)); + using ColMatrixRowType = decltype(A.row(i)); + using RowMatrixColType = decltype(B.col(i)); + using RowMatrixRowType = decltype(B.row(i)); + + // Vector and matrix col/row are valid Stl-style container. + VERIFY_IS_EQUAL(IsContainerType<VectorType>(0), true); + VERIFY_IS_EQUAL(IsContainerType<ColMatrixColType>(0), true); + VERIFY_IS_EQUAL(IsContainerType<ColMatrixRowType>(0), true); + VERIFY_IS_EQUAL(IsContainerType<RowMatrixColType>(0), true); + VERIFY_IS_EQUAL(IsContainerType<RowMatrixRowType>(0), true); + + // But the matrix itself is not a valid Stl-style container. + VERIFY_IS_EQUAL(IsContainerType<ColMatrixType>(0), rows == 1 || cols == 1); + VERIFY_IS_EQUAL(IsContainerType<RowMatrixType>(0), rows == 1 || cols == 1); +} +#endif + +EIGEN_DECLARE_TEST(stl_iterators) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(( test_stl_iterators<double,2,3>() )); + CALL_SUBTEST_1(( test_stl_iterators<float,7,5>() )); + CALL_SUBTEST_1(( test_stl_iterators<int,Dynamic,Dynamic>(internal::random<int>(5,10), internal::random<int>(5,10)) )); + CALL_SUBTEST_1(( test_stl_iterators<int,Dynamic,Dynamic>(internal::random<int>(10,200), internal::random<int>(10,200)) )); + } + +#if EIGEN_HAS_CXX11 + CALL_SUBTEST_1(( test_stl_container_detection<float,1,1>() )); + CALL_SUBTEST_1(( test_stl_container_detection<float,5,5>() )); +#endif +} diff --git a/test/superlu_support.cpp b/test/superlu_support.cpp index 98a7bc5c8..55450c868 100644 --- a/test/superlu_support.cpp +++ b/test/superlu_support.cpp @@ -12,7 +12,7 @@ #include <Eigen/SuperLUSupport> -void test_superlu_support() +EIGEN_DECLARE_TEST(superlu_support) { SuperLU<SparseMatrix<double> > superlu_double_colmajor; SuperLU<SparseMatrix<std::complex<double> > > superlu_cplxdouble_colmajor; diff --git a/test/svd_common.h b/test/svd_common.h index 605d5dfef..eae4c0bfe 100644 --- a/test/svd_common.h +++ b/test/svd_common.h @@ -17,13 +17,13 @@ #endif #include "svd_fill.h" +#include "solverbase.h" // Check that the matrix m is properly reconstructed and that the U and V factors are unitary // The SVD must have already been computed. template<typename SvdType, typename MatrixType> void svd_check_full(const MatrixType& m, const SvdType& svd) { - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -101,7 +101,6 @@ void svd_least_square(const MatrixType& m, unsigned int computationOptions) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -168,7 +167,6 @@ template<typename MatrixType> void svd_min_norm(const MatrixType& m, unsigned int computationOptions) { typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; Index cols = m.cols(); enum { @@ -222,12 +220,33 @@ void svd_min_norm(const MatrixType& m, unsigned int computationOptions) VERIFY_IS_APPROX(x21, x3); } +template<typename MatrixType, typename SolverType> +void svd_test_solvers(const MatrixType& m, const SolverType& solver) { + Index rows, cols, cols2; + + rows = m.rows(); + cols = m.cols(); + + if(MatrixType::ColsAtCompileTime==Dynamic) + { + cols2 = internal::random<int>(2,EIGEN_TEST_MAX_SIZE); + } + else + { + cols2 = cols; + } + typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> CMatrixType; + check_solverbase<CMatrixType, MatrixType>(m, solver, rows, cols, cols2); +} + // Check full, compare_to_full, least_square, and min_norm for all possible compute-options template<typename SvdType, typename MatrixType> void svd_test_all_computation_options(const MatrixType& m, bool full_only) { // if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols()) // return; + STATIC_CHECK(( internal::is_same<typename SvdType::StorageIndex,int>::value )); + SvdType fullSvd(m, ComputeFullU|ComputeFullV); CALL_SUBTEST(( svd_check_full(m, fullSvd) )); CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeFullU | ComputeFullV) )); @@ -237,6 +256,9 @@ void svd_test_all_computation_options(const MatrixType& m, bool full_only) // remark #111: statement is unreachable #pragma warning disable 111 #endif + + svd_test_solvers(m, fullSvd); + if(full_only) return; @@ -261,7 +283,6 @@ void svd_test_all_computation_options(const MatrixType& m, bool full_only) CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeThinV) )); // test reconstruction - typedef typename MatrixType::Index Index; Index diagSize = (std::min)(m.rows(), m.cols()); SvdType svd(m, ComputeThinU | ComputeThinV); VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint()); @@ -277,7 +298,8 @@ EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); } // workaround aggressive optimization in ICC template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; } -// all this function does is verify we don't iterate infinitely on nan/inf values +// This function verifies we don't iterate infinitely on nan/inf values, +// and that info() returns InvalidInput. template<typename SvdType, typename MatrixType> void svd_inf_nan() { @@ -286,18 +308,22 @@ void svd_inf_nan() Scalar some_inf = Scalar(1) / zero<Scalar>(); VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf)); svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV); + VERIFY(svd.info() == InvalidInput); Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); VERIFY(nan != nan); svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV); + VERIFY(svd.info() == InvalidInput); MatrixType m = MatrixType::Zero(10,10); m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf; svd.compute(m, ComputeFullU | ComputeFullV); + VERIFY(svd.info() == InvalidInput); m = MatrixType::Zero(10,10); m(internal::random<int>(0,9), internal::random<int>(0,9)) = nan; svd.compute(m, ComputeFullU | ComputeFullV); + VERIFY(svd.info() == InvalidInput); // regression test for bug 791 m.resize(3,3); @@ -305,6 +331,7 @@ void svd_inf_nan() 0, -0.5, 0, nan, 0, 0; svd.compute(m, ComputeFullU | ComputeFullV); + VERIFY(svd.info() == InvalidInput); m.resize(4,4); m << 1, 0, 0, 0, @@ -312,6 +339,7 @@ void svd_inf_nan() 1, 0, 1, nan, 0, nan, nan, 0; svd.compute(m, ComputeFullU | ComputeFullV); + VERIFY(svd.info() == InvalidInput); } // Regression test for bug 286: JacobiSVD loops indefinitely with some @@ -434,10 +462,9 @@ void svd_preallocate() } template<typename SvdType,typename MatrixType> -void svd_verify_assert(const MatrixType& m) +void svd_verify_assert(const MatrixType& m, bool fullOnly = false) { typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -453,6 +480,8 @@ void svd_verify_assert(const MatrixType& m) VERIFY_RAISES_ASSERT(svd.singularValues()) VERIFY_RAISES_ASSERT(svd.matrixV()) VERIFY_RAISES_ASSERT(svd.solve(rhs)) + VERIFY_RAISES_ASSERT(svd.transpose().solve(rhs)) + VERIFY_RAISES_ASSERT(svd.adjoint().solve(rhs)) MatrixType a = MatrixType::Zero(rows, cols); a.setZero(); svd.compute(a, 0); @@ -460,8 +489,17 @@ void svd_verify_assert(const MatrixType& m) VERIFY_RAISES_ASSERT(svd.matrixV()) svd.singularValues(); VERIFY_RAISES_ASSERT(svd.solve(rhs)) - - if (ColsAtCompileTime == Dynamic) + + svd.compute(a, ComputeFullU); + svd.matrixU(); + VERIFY_RAISES_ASSERT(svd.matrixV()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) + svd.compute(a, ComputeFullV); + svd.matrixV(); + VERIFY_RAISES_ASSERT(svd.matrixU()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) + + if (!fullOnly && ColsAtCompileTime == Dynamic) { svd.compute(a, ComputeThinU); svd.matrixU(); diff --git a/test/svd_fill.h b/test/svd_fill.h index 3877c0c7e..d68647e99 100644 --- a/test/svd_fill.h +++ b/test/svd_fill.h @@ -23,7 +23,6 @@ void svd_fill_random(MatrixType &m, int Option = 0) using std::pow; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; Index diagSize = (std::min)(m.rows(), m.cols()); RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4; s = internal::random<RealScalar>(1,s); diff --git a/test/swap.cpp b/test/swap.cpp index f76e3624d..5b259d3ec 100644 --- a/test/swap.cpp +++ b/test/swap.cpp @@ -28,8 +28,8 @@ template<typename MatrixType> void swap(const MatrixType& m) typedef typename MatrixType::Scalar Scalar; eigen_assert((!internal::is_same<MatrixType,OtherMatrixType>::value)); - typename MatrixType::Index rows = m.rows(); - typename MatrixType::Index cols = m.cols(); + Index rows = m.rows(); + Index cols = m.cols(); // construct 3 matrix guaranteed to be distinct MatrixType m1 = MatrixType::Random(rows,cols); @@ -83,7 +83,7 @@ template<typename MatrixType> void swap(const MatrixType& m) } } -void test_swap() +EIGEN_DECLARE_TEST(swap) { int s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_1( swap(Matrix3f()) ); // fixed size, no vectorization diff --git a/test/symbolic_index.cpp b/test/symbolic_index.cpp new file mode 100644 index 000000000..b114cbb95 --- /dev/null +++ b/test/symbolic_index.cpp @@ -0,0 +1,84 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifdef EIGEN_TEST_PART_2 +#define EIGEN_MAX_CPP_VER 03 + +// see indexed_view.cpp +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) + #pragma GCC diagnostic ignored "-Wdeprecated" +#endif + +#endif + +#include "main.h" + +template<typename T1,typename T2> +bool is_same_symb(const T1& a, const T2& b, Index size) +{ + return a.eval(last=size-1) == b.eval(last=size-1); +} + +template<typename T> +void check_is_symbolic(const T&) { + STATIC_CHECK(( symbolic::is_symbolic<T>::value )) +} + +template<typename T> +void check_isnot_symbolic(const T&) { + STATIC_CHECK(( !symbolic::is_symbolic<T>::value )) +} + +#define VERIFY_EQ_INT(A,B) VERIFY_IS_APPROX(int(A),int(B)) + +void check_symbolic_index() +{ + check_is_symbolic(last); + check_is_symbolic(lastp1); + check_is_symbolic(last+1); + check_is_symbolic(last-lastp1); + check_is_symbolic(2*last-lastp1/2); + check_isnot_symbolic(fix<3>()); + + Index size=100; + + // First, let's check FixedInt arithmetic: + VERIFY( is_same_type( (fix<5>()-fix<3>())*fix<9>()/(-fix<3>()), fix<-(5-3)*9/3>() ) ); + VERIFY( is_same_type( (fix<5>()-fix<3>())*fix<9>()/fix<2>(), fix<(5-3)*9/2>() ) ); + VERIFY( is_same_type( fix<9>()/fix<2>(), fix<9/2>() ) ); + VERIFY( is_same_type( fix<9>()%fix<2>(), fix<9%2>() ) ); + VERIFY( is_same_type( fix<9>()&fix<2>(), fix<9&2>() ) ); + VERIFY( is_same_type( fix<9>()|fix<2>(), fix<9|2>() ) ); + VERIFY( is_same_type( fix<9>()/2, int(9/2) ) ); + + VERIFY( is_same_symb( lastp1-1, last, size) ); + VERIFY( is_same_symb( lastp1-fix<1>, last, size) ); + + VERIFY_IS_EQUAL( ( (last*5-2)/3 ).eval(last=size-1), ((size-1)*5-2)/3 ); + VERIFY_IS_EQUAL( ( (last*fix<5>-fix<2>)/fix<3> ).eval(last=size-1), ((size-1)*5-2)/3 ); + VERIFY_IS_EQUAL( ( -last*lastp1 ).eval(last=size-1), -(size-1)*size ); + VERIFY_IS_EQUAL( ( lastp1-3*last ).eval(last=size-1), size- 3*(size-1) ); + VERIFY_IS_EQUAL( ( (lastp1-3*last)/lastp1 ).eval(last=size-1), (size- 3*(size-1))/size ); + +#if EIGEN_HAS_CXX14 + { + struct x_tag {}; static const symbolic::SymbolExpr<x_tag> x; + struct y_tag {}; static const symbolic::SymbolExpr<y_tag> y; + struct z_tag {}; static const symbolic::SymbolExpr<z_tag> z; + + VERIFY_IS_APPROX( int(((x+3)/y+z).eval(x=6,y=3,z=-13)), (6+3)/3+(-13) ); + } +#endif +} + +EIGEN_DECLARE_TEST(symbolic_index) +{ + CALL_SUBTEST_1( check_symbolic_index() ); + CALL_SUBTEST_2( check_symbolic_index() ); +} diff --git a/test/triangular.cpp b/test/triangular.cpp index b96856486..981a0d071 100644 --- a/test/triangular.cpp +++ b/test/triangular.cpp @@ -7,9 +7,35 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#ifdef EIGEN_TEST_PART_100 +# define EIGEN_NO_DEPRECATED_WARNING +#endif + #include "main.h" +template<typename MatrixType> void triangular_deprecated(const MatrixType &m) +{ + Index rows = m.rows(); + Index cols = m.cols(); + MatrixType m1, m2, m3, m4; + m1.setRandom(rows,cols); + m2.setRandom(rows,cols); + m3 = m1; m4 = m2; + // deprecated method: + m1.template triangularView<Eigen::Upper>().swap(m2); + // use this method instead: + m3.template triangularView<Eigen::Upper>().swap(m4.template triangularView<Eigen::Upper>()); + VERIFY_IS_APPROX(m1,m3); + VERIFY_IS_APPROX(m2,m4); + // deprecated method: + m1.template triangularView<Eigen::Lower>().swap(m4); + // use this method instead: + m3.template triangularView<Eigen::Lower>().swap(m2.template triangularView<Eigen::Lower>()); + VERIFY_IS_APPROX(m1,m3); + VERIFY_IS_APPROX(m2,m4); +} + template<typename MatrixType> void triangular_square(const MatrixType& m) { @@ -19,8 +45,8 @@ template<typename MatrixType> void triangular_square(const MatrixType& m) RealScalar largerEps = 10*test_precision<RealScalar>(); - typename MatrixType::Index rows = m.rows(); - typename MatrixType::Index cols = m.cols(); + Index rows = m.rows(); + Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), @@ -68,7 +94,7 @@ template<typename MatrixType> void triangular_square(const MatrixType& m) while (numext::abs2(m1(i,i))<RealScalar(1e-1)) m1(i,i) = internal::random<Scalar>(); Transpose<MatrixType> trm4(m4); - // test back and forward subsitution with a vector as the rhs + // test back and forward substitution with a vector as the rhs m3 = m1.template triangularView<Upper>(); VERIFY(v2.isApprox(m3.adjoint() * (m1.adjoint().template triangularView<Lower>().solve(v2)), largerEps)); m3 = m1.template triangularView<Lower>(); @@ -109,11 +135,12 @@ template<typename MatrixType> void triangular_square(const MatrixType& m) // test swap m1.setOnes(); m2.setZero(); - m2.template triangularView<Upper>().swap(m1); + m2.template triangularView<Upper>().swap(m1.template triangularView<Eigen::Upper>()); m3.setZero(); m3.template triangularView<Upper>().setOnes(); VERIFY_IS_APPROX(m2,m3); - + VERIFY_RAISES_STATIC_ASSERT(m1.template triangularView<Eigen::Lower>().swap(m2.template triangularView<Eigen::Upper>())); + m1.setRandom(); m3 = m1.template triangularView<Upper>(); Matrix<Scalar, MatrixType::ColsAtCompileTime, Dynamic> m5(cols, internal::random<int>(1,20)); m5.setRandom(); @@ -129,12 +156,27 @@ template<typename MatrixType> void triangular_square(const MatrixType& m) VERIFY_IS_APPROX(m1.template selfadjointView<Upper>().diagonal(), m1.diagonal()); + m3.setRandom(); + const MatrixType& m3c(m3); + VERIFY( is_same_type(m3c.template triangularView<Lower>(),m3.template triangularView<Lower>().template conjugateIf<false>()) ); + VERIFY( is_same_type(m3c.template triangularView<Lower>().conjugate(),m3.template triangularView<Lower>().template conjugateIf<true>()) ); + VERIFY_IS_APPROX(m3.template triangularView<Lower>().template conjugateIf<true>().toDenseMatrix(), + m3.conjugate().template triangularView<Lower>().toDenseMatrix()); + VERIFY_IS_APPROX(m3.template triangularView<Lower>().template conjugateIf<false>().toDenseMatrix(), + m3.template triangularView<Lower>().toDenseMatrix()); + + VERIFY( is_same_type(m3c.template selfadjointView<Lower>(),m3.template selfadjointView<Lower>().template conjugateIf<false>()) ); + VERIFY( is_same_type(m3c.template selfadjointView<Lower>().conjugate(),m3.template selfadjointView<Lower>().template conjugateIf<true>()) ); + VERIFY_IS_APPROX(m3.template selfadjointView<Lower>().template conjugateIf<true>().toDenseMatrix(), + m3.conjugate().template selfadjointView<Lower>().toDenseMatrix()); + VERIFY_IS_APPROX(m3.template selfadjointView<Lower>().template conjugateIf<false>().toDenseMatrix(), + m3.template selfadjointView<Lower>().toDenseMatrix()); + } template<typename MatrixType> void triangular_rect(const MatrixType& m) { - typedef const typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime }; @@ -209,7 +251,7 @@ template<typename MatrixType> void triangular_rect(const MatrixType& m) // test swap m1.setOnes(); m2.setZero(); - m2.template triangularView<Upper>().swap(m1); + m2.template triangularView<Upper>().swap(m1.template triangularView<Eigen::Upper>()); m3.setZero(); m3.template triangularView<Upper>().setOnes(); VERIFY_IS_APPROX(m2,m3); @@ -221,7 +263,7 @@ void bug_159() EIGEN_UNUSED_VARIABLE(m) } -void test_triangular() +EIGEN_DECLARE_TEST(triangular) { int maxsize = (std::min)(EIGEN_TEST_MAX_SIZE,20); for(int i = 0; i < g_repeat ; i++) @@ -241,6 +283,9 @@ void test_triangular() CALL_SUBTEST_9( triangular_rect(MatrixXcf(r, c)) ); CALL_SUBTEST_5( triangular_rect(MatrixXcd(r, c)) ); CALL_SUBTEST_6( triangular_rect(Matrix<float,Dynamic,Dynamic,RowMajor>(r, c)) ); + + CALL_SUBTEST_100( triangular_deprecated(Matrix<float, 5, 7>()) ); + CALL_SUBTEST_100( triangular_deprecated(MatrixXd(r,c)) ); } CALL_SUBTEST_1( bug_159() ); diff --git a/test/type_alias.cpp b/test/type_alias.cpp new file mode 100644 index 000000000..9a6616c72 --- /dev/null +++ b/test/type_alias.cpp @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 Gael Guennebaud <gael.guennebaud@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +EIGEN_DECLARE_TEST(type_alias) +{ + using namespace internal; + + // To warm up, some basic checks: + STATIC_CHECK((is_same<MatrixXd,Matrix<double,Dynamic,Dynamic> >::value)); + STATIC_CHECK((is_same<Matrix2f,Matrix<float,2,2> >::value)); + STATIC_CHECK((is_same<Array33i,Array<int,3,3> >::value)); + +#if EIGEN_HAS_CXX11 + + STATIC_CHECK((is_same<MatrixX<double>, MatrixXd>::value)); + STATIC_CHECK((is_same<MatrixX<int>, MatrixXi>::value)); + STATIC_CHECK((is_same<Matrix2<int>, Matrix2i>::value)); + STATIC_CHECK((is_same<Matrix2X<float>, Matrix2Xf>::value)); + STATIC_CHECK((is_same<MatrixX4<double>, MatrixX4d>::value)); + STATIC_CHECK((is_same<VectorX<int>, VectorXi>::value)); + STATIC_CHECK((is_same<Vector2<float>, Vector2f>::value)); + STATIC_CHECK((is_same<RowVectorX<int>, RowVectorXi>::value)); + STATIC_CHECK((is_same<RowVector2<float>, RowVector2f>::value)); + + STATIC_CHECK((is_same<ArrayXX<float>, ArrayXXf>::value)); + STATIC_CHECK((is_same<Array33<int>, Array33i>::value)); + STATIC_CHECK((is_same<Array2X<float>, Array2Xf>::value)); + STATIC_CHECK((is_same<ArrayX4<double>, ArrayX4d>::value)); + STATIC_CHECK((is_same<ArrayX<double>, ArrayXd>::value)); + STATIC_CHECK((is_same<Array4<double>, Array4d>::value)); + + STATIC_CHECK((is_same<Vector<float,3>, Vector3f>::value)); + STATIC_CHECK((is_same<Vector<int,Dynamic>, VectorXi>::value)); + STATIC_CHECK((is_same<RowVector<float,3>, RowVector3f>::value)); + STATIC_CHECK((is_same<RowVector<int,Dynamic>, RowVectorXi>::value)); + +#else + std::cerr << "WARNING: c++11 type aliases not tested.\n"; +#endif +} diff --git a/test/umeyama.cpp b/test/umeyama.cpp index 2e8092434..170c28a61 100644 --- a/test/umeyama.cpp +++ b/test/umeyama.cpp @@ -27,7 +27,7 @@ Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> randMatrixUnitary(int size) MatrixType Q; int max_tries = 40; - double is_unitary = false; + bool is_unitary = false; while (!is_unitary && max_tries > 0) { @@ -155,7 +155,7 @@ void run_fixed_size_test(int num_elements) VERIFY(error < Scalar(16)*std::numeric_limits<Scalar>::epsilon()); } -void test_umeyama() +EIGEN_DECLARE_TEST(umeyama) { for (int i=0; i<g_repeat; ++i) { diff --git a/test/umfpack_support.cpp b/test/umfpack_support.cpp index 37ab11f0b..d8f2a6f80 100644 --- a/test/umfpack_support.cpp +++ b/test/umfpack_support.cpp @@ -12,10 +12,10 @@ #include <Eigen/UmfPackSupport> -template<typename T> void test_umfpack_support_T() +template<typename T1, typename T2> void test_umfpack_support_T() { - UmfPackLU<SparseMatrix<T, ColMajor> > umfpack_colmajor; - UmfPackLU<SparseMatrix<T, RowMajor> > umfpack_rowmajor; + UmfPackLU<SparseMatrix<T1, ColMajor, T2> > umfpack_colmajor; + UmfPackLU<SparseMatrix<T1, RowMajor, T2> > umfpack_rowmajor; check_sparse_square_solving(umfpack_colmajor); check_sparse_square_solving(umfpack_rowmajor); @@ -24,9 +24,11 @@ template<typename T> void test_umfpack_support_T() check_sparse_square_determinant(umfpack_rowmajor); } -void test_umfpack_support() +EIGEN_DECLARE_TEST(umfpack_support) { - CALL_SUBTEST_1(test_umfpack_support_T<double>()); - CALL_SUBTEST_2(test_umfpack_support_T<std::complex<double> >()); + CALL_SUBTEST_1((test_umfpack_support_T<double, int>())); + CALL_SUBTEST_2((test_umfpack_support_T<std::complex<double>, int>())); + CALL_SUBTEST_3((test_umfpack_support_T<double, long >())); + CALL_SUBTEST_4((test_umfpack_support_T<std::complex<double>, long>())); } diff --git a/test/unalignedassert.cpp b/test/unalignedassert.cpp deleted file mode 100644 index 731a08977..000000000 --- a/test/unalignedassert.cpp +++ /dev/null @@ -1,180 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com> -// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#if defined(EIGEN_TEST_PART_1) - // default -#elif defined(EIGEN_TEST_PART_2) - #define EIGEN_MAX_STATIC_ALIGN_BYTES 16 - #define EIGEN_MAX_ALIGN_BYTES 16 -#elif defined(EIGEN_TEST_PART_3) - #define EIGEN_MAX_STATIC_ALIGN_BYTES 32 - #define EIGEN_MAX_ALIGN_BYTES 32 -#elif defined(EIGEN_TEST_PART_4) - #define EIGEN_MAX_STATIC_ALIGN_BYTES 64 - #define EIGEN_MAX_ALIGN_BYTES 64 -#endif - -#include "main.h" - -typedef Matrix<float, 6,1> Vector6f; -typedef Matrix<float, 8,1> Vector8f; -typedef Matrix<float, 12,1> Vector12f; - -typedef Matrix<double, 5,1> Vector5d; -typedef Matrix<double, 6,1> Vector6d; -typedef Matrix<double, 7,1> Vector7d; -typedef Matrix<double, 8,1> Vector8d; -typedef Matrix<double, 9,1> Vector9d; -typedef Matrix<double,10,1> Vector10d; -typedef Matrix<double,12,1> Vector12d; - -struct TestNew1 -{ - MatrixXd m; // good: m will allocate its own array, taking care of alignment. - TestNew1() : m(20,20) {} -}; - -struct TestNew2 -{ - Matrix3d m; // good: m's size isn't a multiple of 16 bytes, so m doesn't have to be 16-byte aligned, - // 8-byte alignment is good enough here, which we'll get automatically -}; - -struct TestNew3 -{ - Vector2f m; // good: m's size isn't a multiple of 16 bytes, so m doesn't have to be 16-byte aligned -}; - -struct TestNew4 -{ - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - Vector2d m; - float f; // make the struct have sizeof%16!=0 to make it a little more tricky when we allow an array of 2 such objects -}; - -struct TestNew5 -{ - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - float f; // try the f at first -- the EIGEN_ALIGN_MAX attribute of m should make that still work - Matrix4f m; -}; - -struct TestNew6 -{ - Matrix<float,2,2,DontAlign> m; // good: no alignment requested - float f; -}; - -template<bool Align> struct Depends -{ - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Align) - Vector2d m; - float f; -}; - -template<typename T> -void check_unalignedassert_good() -{ - T *x, *y; - x = new T; - delete x; - y = new T[2]; - delete[] y; -} - -#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 -template<typename T> -void construct_at_boundary(int boundary) -{ - char buf[sizeof(T)+256]; - size_t _buf = reinterpret_cast<internal::UIntPtr>(buf); - _buf += (EIGEN_MAX_ALIGN_BYTES - (_buf % EIGEN_MAX_ALIGN_BYTES)); // make 16/32/...-byte aligned - _buf += boundary; // make exact boundary-aligned - T *x = ::new(reinterpret_cast<void*>(_buf)) T; - x[0].setZero(); // just in order to silence warnings - x->~T(); -} -#endif - -void unalignedassert() -{ -#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 - construct_at_boundary<Vector2f>(4); - construct_at_boundary<Vector3f>(4); - construct_at_boundary<Vector4f>(16); - construct_at_boundary<Vector6f>(4); - construct_at_boundary<Vector8f>(EIGEN_MAX_ALIGN_BYTES); - construct_at_boundary<Vector12f>(16); - construct_at_boundary<Matrix2f>(16); - construct_at_boundary<Matrix3f>(4); - construct_at_boundary<Matrix4f>(EIGEN_MAX_ALIGN_BYTES); - - construct_at_boundary<Vector2d>(16); - construct_at_boundary<Vector3d>(4); - construct_at_boundary<Vector4d>(EIGEN_MAX_ALIGN_BYTES); - construct_at_boundary<Vector5d>(4); - construct_at_boundary<Vector6d>(16); - construct_at_boundary<Vector7d>(4); - construct_at_boundary<Vector8d>(EIGEN_MAX_ALIGN_BYTES); - construct_at_boundary<Vector9d>(4); - construct_at_boundary<Vector10d>(16); - construct_at_boundary<Vector12d>(EIGEN_MAX_ALIGN_BYTES); - construct_at_boundary<Matrix2d>(EIGEN_MAX_ALIGN_BYTES); - construct_at_boundary<Matrix3d>(4); - construct_at_boundary<Matrix4d>(EIGEN_MAX_ALIGN_BYTES); - - construct_at_boundary<Vector2cf>(16); - construct_at_boundary<Vector3cf>(4); - construct_at_boundary<Vector2cd>(EIGEN_MAX_ALIGN_BYTES); - construct_at_boundary<Vector3cd>(16); -#endif - - check_unalignedassert_good<TestNew1>(); - check_unalignedassert_good<TestNew2>(); - check_unalignedassert_good<TestNew3>(); - - check_unalignedassert_good<TestNew4>(); - check_unalignedassert_good<TestNew5>(); - check_unalignedassert_good<TestNew6>(); - check_unalignedassert_good<Depends<true> >(); - -#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 - if(EIGEN_MAX_ALIGN_BYTES>=16) - { - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4f>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector8f>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector12f>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector2d>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4d>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector6d>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector8d>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector10d>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector12d>(8)); - // Complexes are disabled because the compiler might aggressively vectorize - // the initialization of complex coeffs to 0 before we can check for alignedness - //VERIFY_RAISES_ASSERT(construct_at_boundary<Vector2cf>(8)); - VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4i>(8)); - } - for(int b=8; b<EIGEN_MAX_ALIGN_BYTES; b+=8) - { - if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Vector8f>(b)); - if(b<64) VERIFY_RAISES_ASSERT(construct_at_boundary<Matrix4f>(b)); - if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4d>(b)); - if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Matrix2d>(b)); - if(b<128) VERIFY_RAISES_ASSERT(construct_at_boundary<Matrix4d>(b)); - //if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Vector2cd>(b)); - } -#endif -} - -void test_unalignedassert() -{ - CALL_SUBTEST(unalignedassert()); -} diff --git a/test/unalignedcount.cpp b/test/unalignedcount.cpp index d6ffeafdf..52cdd9e1d 100644 --- a/test/unalignedcount.cpp +++ b/test/unalignedcount.cpp @@ -28,9 +28,16 @@ static int nb_storeu; #include "main.h" -void test_unalignedcount() +EIGEN_DECLARE_TEST(unalignedcount) { - #if defined(EIGEN_VECTORIZE_AVX) + #if defined(EIGEN_VECTORIZE_AVX512) + VectorXf a(48), b(48); + VERIFY_ALIGNED_UNALIGNED_COUNT(a += b, 6, 0, 3, 0); + VERIFY_ALIGNED_UNALIGNED_COUNT(a.segment(0,48) += b.segment(0,48), 3, 3, 3, 0); + VERIFY_ALIGNED_UNALIGNED_COUNT(a.segment(0,48) -= b.segment(0,48), 3, 3, 3, 0); + VERIFY_ALIGNED_UNALIGNED_COUNT(a.segment(0,48) *= 3.5, 3, 0, 3, 0); + VERIFY_ALIGNED_UNALIGNED_COUNT(a.segment(0,48) /= 3.5, 3, 0, 3, 0); + #elif defined(EIGEN_VECTORIZE_AVX) VectorXf a(40), b(40); VERIFY_ALIGNED_UNALIGNED_COUNT(a += b, 10, 0, 5, 0); VERIFY_ALIGNED_UNALIGNED_COUNT(a.segment(0,40) += b.segment(0,40), 5, 5, 5, 0); diff --git a/test/upperbidiagonalization.cpp b/test/upperbidiagonalization.cpp index 847b34b55..945c99959 100644 --- a/test/upperbidiagonalization.cpp +++ b/test/upperbidiagonalization.cpp @@ -12,8 +12,8 @@ template<typename MatrixType> void upperbidiag(const MatrixType& m) { - const typename MatrixType::Index rows = m.rows(); - const typename MatrixType::Index cols = m.cols(); + const Index rows = m.rows(); + const Index cols = m.cols(); typedef Matrix<typename MatrixType::RealScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> RealMatrixType; typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime> TransposeMatrixType; @@ -29,7 +29,7 @@ template<typename MatrixType> void upperbidiag(const MatrixType& m) VERIFY_IS_APPROX(a.adjoint(),d); } -void test_upperbidiagonalization() +EIGEN_DECLARE_TEST(upperbidiagonalization) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( upperbidiag(MatrixXf(3,3)) ); diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp index 83c1439ad..97c0bdad9 100644 --- a/test/vectorization_logic.cpp +++ b/test/vectorization_logic.cpp @@ -22,6 +22,14 @@ #include "main.h" #include <typeinfo> +// Disable "ignoring attributes on template argument" +// for packet_traits<Packet*> +// => The only workaround would be to wrap _m128 and the likes +// within wrappers. +#if EIGEN_GNUC_AT_LEAST(6,0) + #pragma GCC diagnostic ignored "-Wignored-attributes" +#endif + using internal::demangle_flags; using internal::demangle_traversal; using internal::demangle_unrolling; @@ -29,6 +37,7 @@ using internal::demangle_unrolling; template<typename Dst, typename Src> bool test_assign(const Dst&, const Src&, int traversal, int unrolling) { + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src); typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar,typename Src::Scalar> > traits; bool res = traits::Traversal==traversal; if(unrolling==InnerUnrolling+CompleteUnrolling) @@ -53,6 +62,7 @@ bool test_assign(const Dst&, const Src&, int traversal, int unrolling) template<typename Dst, typename Src> bool test_assign(int traversal, int unrolling) { + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src); typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar,typename Src::Scalar> > traits; bool res = traits::Traversal==traversal && traits::Unrolling==unrolling; if(!res) @@ -109,26 +119,26 @@ struct vectorization_logic typedef Matrix<Scalar,Dynamic,1> VectorX; typedef Matrix<Scalar,Dynamic,Dynamic> MatrixXX; typedef Matrix<Scalar,PacketSize,PacketSize> Matrix11; - typedef Matrix<Scalar,2*PacketSize,2*PacketSize> Matrix22; + typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?8:2*PacketSize,(Matrix11::Flags&RowMajorBit)?2*PacketSize:8> Matrix22; typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16> Matrix44; typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16,DontAlign|EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION> Matrix44u; typedef Matrix<Scalar,4*PacketSize,4*PacketSize,ColMajor> Matrix44c; typedef Matrix<Scalar,4*PacketSize,4*PacketSize,RowMajor> Matrix44r; typedef Matrix<Scalar, - (PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), - (PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1) + (PacketSize==16 ? 8 : PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 2 : PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1) > Matrix1; typedef Matrix<Scalar, - (PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), - (PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 8 : PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 2 : PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1), DontAlign|((Matrix1::Flags&RowMajorBit)?RowMajor:ColMajor)> Matrix1u; // this type is made such that it can only be vectorized when viewed as a linear 1D vector typedef Matrix<Scalar, - (PacketSize==8 ? 4 : PacketSize==4 ? 6 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?2:3) : /*PacketSize==1 ?*/ 1), - (PacketSize==8 ? 6 : PacketSize==4 ? 2 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?3:2) : /*PacketSize==1 ?*/ 3) + (PacketSize==16 ? 4 : PacketSize==8 ? 4 : PacketSize==4 ? 6 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?2:3) : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 12 : PacketSize==8 ? 6 : PacketSize==4 ? 2 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?3:2) : /*PacketSize==1 ?*/ 3) > Matrix3; #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT @@ -141,14 +151,6 @@ struct vectorization_logic VERIFY(test_assign(Vector1(),Vector1().template cast<Scalar>(), InnerVectorizedTraversal,CompleteUnrolling)); - - VERIFY(test_assign(Vector1(),Vector1(), - InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Vector1(),Vector1()+Vector1(), - InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Vector1(),Vector1().cwiseProduct(Vector1()), - InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Matrix44(),Matrix44()+Matrix44(), InnerVectorizedTraversal,InnerUnrolling)); @@ -157,11 +159,11 @@ struct vectorization_logic EIGEN_UNALIGNED_VECTORIZE ? InnerUnrolling : NoUnrolling)); VERIFY(test_assign(Matrix1(),Matrix1()+Matrix1(), - (Matrix1::InnerSizeAtCompileTime % PacketSize)==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal, + (int(Matrix1::InnerSizeAtCompileTime) % int(PacketSize))==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal, CompleteUnrolling)); VERIFY(test_assign(Matrix1u(),Matrix1()+Matrix1(), - EIGEN_UNALIGNED_VECTORIZE ? ((Matrix1::InnerSizeAtCompileTime % PacketSize)==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal) + EIGEN_UNALIGNED_VECTORIZE ? ((int(Matrix1::InnerSizeAtCompileTime) % int(PacketSize))==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal, CompleteUnrolling)); VERIFY(test_assign(Matrix44c().col(1),Matrix44c().col(2)+Matrix44c().col(3), @@ -177,24 +179,25 @@ struct vectorization_logic VERIFY(test_assign(Matrix33c().row(2),Matrix33c().row(1)+Matrix33c().row(1), LinearTraversal,CompleteUnrolling)); VERIFY(test_assign(Vector3(),Vector3()+Vector3(), - EIGEN_UNALIGNED_VECTORIZE ? (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearTraversal), CompleteUnrolling)); + sizeof(Scalar)==16 ? InnerVectorizedTraversal : (EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : LinearTraversal), CompleteUnrolling)); VERIFY(test_assign(Matrix33c().col(0),Matrix33c().col(1)+Matrix33c().col(1), - EIGEN_UNALIGNED_VECTORIZE ? (HalfPacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : (HalfPacketSize==1 ? SliceVectorizedTraversal : LinearTraversal), - ((!EIGEN_UNALIGNED_VECTORIZE) && HalfPacketSize==1) ? NoUnrolling : CompleteUnrolling)); + EIGEN_UNALIGNED_VECTORIZE ? (sizeof(Scalar)==16 ? InnerVectorizedTraversal : LinearVectorizedTraversal) + : (sizeof(Scalar)==16 ? SliceVectorizedTraversal : LinearTraversal), + ((!EIGEN_UNALIGNED_VECTORIZE) && (sizeof(Scalar)==16)) ? NoUnrolling : CompleteUnrolling)); VERIFY(test_assign(Matrix3(),Matrix3().cwiseProduct(Matrix3()), LinearVectorizedTraversal,CompleteUnrolling)); VERIFY(test_assign(Matrix<Scalar,17,17>(),Matrix<Scalar,17,17>()+Matrix<Scalar,17,17>(), - HalfPacketSize==1 ? InnerVectorizedTraversal : + sizeof(Scalar)==16 ? InnerVectorizedTraversal : EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : - LinearTraversal, + LinearTraversal, NoUnrolling)); VERIFY(test_assign(Matrix11(), Matrix11()+Matrix11(),InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Matrix11(),Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(2,3)+Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(8,4), + VERIFY(test_assign(Matrix11(),Matrix<Scalar,21,21>().template block<PacketSize,PacketSize>(2,3)+Matrix<Scalar,21,21>().template block<PacketSize,PacketSize>(3,2), (EIGEN_UNALIGNED_VECTORIZE) ? InnerVectorizedTraversal : DefaultTraversal, CompleteUnrolling|InnerUnrolling)); VERIFY(test_assign(Vector1(),Matrix11()*Vector1(), @@ -207,6 +210,12 @@ struct vectorization_logic VERIFY(test_redux(Vector1(), LinearVectorizedTraversal,CompleteUnrolling)); + VERIFY(test_redux(Vector1().array()*Vector1().array(), + LinearVectorizedTraversal,CompleteUnrolling)); + + VERIFY(test_redux((Vector1().array()*Vector1().array()).col(0), + LinearVectorizedTraversal,CompleteUnrolling)); + VERIFY(test_redux(Matrix<Scalar,PacketSize,3>(), LinearVectorizedTraversal,CompleteUnrolling)); @@ -216,8 +225,13 @@ struct vectorization_logic VERIFY(test_redux(Matrix44(), LinearVectorizedTraversal,NoUnrolling)); - VERIFY(test_redux(Matrix44().template block<(Matrix1::Flags&RowMajorBit)?4:PacketSize,(Matrix1::Flags&RowMajorBit)?PacketSize:4>(1,2), - DefaultTraversal,CompleteUnrolling)); + if(PacketSize>1) { + VERIFY(test_redux(Matrix44().template block<(Matrix1::Flags&RowMajorBit)?4:PacketSize,(Matrix1::Flags&RowMajorBit)?PacketSize:4>(1,2), + SliceVectorizedTraversal,CompleteUnrolling)); + + VERIFY(test_redux(Matrix44().template block<(Matrix1::Flags&RowMajorBit)?2:PacketSize,(Matrix1::Flags&RowMajorBit)?PacketSize:2>(1,2), + DefaultTraversal,CompleteUnrolling)); + } VERIFY(test_redux(Matrix44c().template block<2*PacketSize,1>(1,2), LinearVectorizedTraversal,CompleteUnrolling)); @@ -269,25 +283,21 @@ struct vectorization_logic_half typedef Matrix<Scalar,5*PacketSize,7,ColMajor> Matrix57; typedef Matrix<Scalar,3*PacketSize,5,ColMajor> Matrix35; typedef Matrix<Scalar,5*PacketSize,7,DontAlign|ColMajor> Matrix57u; -// typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16> Matrix44; -// typedef Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*PacketSize:16,DontAlign|EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION> Matrix44u; -// typedef Matrix<Scalar,4*PacketSize,4*PacketSize,ColMajor> Matrix44c; -// typedef Matrix<Scalar,4*PacketSize,4*PacketSize,RowMajor> Matrix44r; typedef Matrix<Scalar, - (PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), - (PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1) + (PacketSize==16 ? 8 : PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 2 : PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1) > Matrix1; typedef Matrix<Scalar, - (PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), - (PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 8 : PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 2 : PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1), DontAlign|((Matrix1::Flags&RowMajorBit)?RowMajor:ColMajor)> Matrix1u; // this type is made such that it can only be vectorized when viewed as a linear 1D vector typedef Matrix<Scalar, - (PacketSize==8 ? 4 : PacketSize==4 ? 6 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?2:3) : /*PacketSize==1 ?*/ 1), - (PacketSize==8 ? 6 : PacketSize==4 ? 2 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?3:2) : /*PacketSize==1 ?*/ 3) + (PacketSize==16 ? 4 : PacketSize==8 ? 4 : PacketSize==4 ? 6 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?2:3) : /*PacketSize==1 ?*/ 1), + (PacketSize==16 ? 12 : PacketSize==8 ? 6 : PacketSize==4 ? 2 : PacketSize==2 ? ((Matrix11::Flags&RowMajorBit)?3:2) : /*PacketSize==1 ?*/ 3) > Matrix3; #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT @@ -306,14 +316,6 @@ struct vectorization_logic_half VERIFY(test_assign(Vector1(),Vector1().template cast<Scalar>(), InnerVectorizedTraversal,CompleteUnrolling)); - - VERIFY(test_assign(Vector1(),Vector1(), - InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Vector1(),Vector1()+Vector1(), - InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Vector1(),Vector1().cwiseProduct(Vector1()), - InnerVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_assign(Matrix57(),Matrix57()+Matrix57(), InnerVectorizedTraversal,InnerUnrolling)); @@ -322,7 +324,7 @@ struct vectorization_logic_half EIGEN_UNALIGNED_VECTORIZE ? InnerUnrolling : NoUnrolling)); VERIFY(test_assign(Matrix1u(),Matrix1()+Matrix1(), - EIGEN_UNALIGNED_VECTORIZE ? ((Matrix1::InnerSizeAtCompileTime % PacketSize)==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,CompleteUnrolling)); + EIGEN_UNALIGNED_VECTORIZE ? ((int(Matrix1::InnerSizeAtCompileTime) % int(PacketSize))==0 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,CompleteUnrolling)); if(PacketSize>1) { @@ -330,17 +332,20 @@ struct vectorization_logic_half VERIFY(test_assign(Matrix33c().row(2),Matrix33c().row(1)+Matrix33c().row(1), LinearTraversal,CompleteUnrolling)); VERIFY(test_assign(Matrix33c().col(0),Matrix33c().col(1)+Matrix33c().col(1), - EIGEN_UNALIGNED_VECTORIZE ? (PacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal,CompleteUnrolling)); + EIGEN_UNALIGNED_VECTORIZE ? (sizeof(Scalar)==16 ? InnerVectorizedTraversal : LinearVectorizedTraversal) + : (sizeof(Scalar)==16 ? SliceVectorizedTraversal : LinearTraversal), + ((!EIGEN_UNALIGNED_VECTORIZE) && (sizeof(Scalar)==16)) ? NoUnrolling : CompleteUnrolling)); VERIFY(test_assign(Matrix3(),Matrix3().cwiseQuotient(Matrix3()), PacketTraits::HasDiv ? LinearVectorizedTraversal : LinearTraversal,CompleteUnrolling)); VERIFY(test_assign(Matrix<Scalar,17,17>(),Matrix<Scalar,17,17>()+Matrix<Scalar,17,17>(), - EIGEN_UNALIGNED_VECTORIZE ? (PacketSize==1 ? InnerVectorizedTraversal : LinearVectorizedTraversal) : LinearTraversal, + sizeof(Scalar)==16 ? InnerVectorizedTraversal : (EIGEN_UNALIGNED_VECTORIZE ? LinearVectorizedTraversal : LinearTraversal), NoUnrolling)); VERIFY(test_assign(Matrix11(),Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(2,3)+Matrix<Scalar,17,17>().template block<PacketSize,PacketSize>(8,4), - EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : DefaultTraversal,PacketSize>4?InnerUnrolling:CompleteUnrolling)); + EIGEN_UNALIGNED_VECTORIZE ? InnerVectorizedTraversal : DefaultTraversal,InnerUnrolling+CompleteUnrolling)); + VERIFY(test_assign(Vector1(),Matrix11()*Vector1(), InnerVectorizedTraversal,CompleteUnrolling)); @@ -361,16 +366,21 @@ struct vectorization_logic_half VERIFY(test_redux(Matrix35(), LinearVectorizedTraversal,CompleteUnrolling)); - VERIFY(test_redux(Matrix57().template block<PacketSize,3>(1,0), - DefaultTraversal,CompleteUnrolling)); + VERIFY(test_redux(Matrix57().template block<PacketSize==1?2:PacketSize,3>(1,0), + SliceVectorizedTraversal,CompleteUnrolling)); + + if(PacketSize>1) { + VERIFY(test_redux(Matrix57().template block<PacketSize,2>(1,0), + DefaultTraversal,CompleteUnrolling)); + } VERIFY((test_assign< Map<Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >, Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)> - >(DefaultTraversal,CompleteUnrolling))); + >(DefaultTraversal,PacketSize>4?InnerUnrolling:CompleteUnrolling))); VERIFY((test_assign(Matrix57(), Matrix<Scalar,5*PacketSize,3>()*Matrix<Scalar,3,7>(), - InnerVectorizedTraversal, InnerUnrolling|CompleteUnrolling))); + InnerVectorizedTraversal, InnerUnrolling+CompleteUnrolling))); #endif } }; @@ -380,7 +390,7 @@ template<typename Scalar> struct vectorization_logic_half<Scalar,false> static void run() {} }; -void test_vectorization_logic() +EIGEN_DECLARE_TEST(vectorization_logic) { #ifdef EIGEN_VECTORIZE diff --git a/test/vectorwiseop.cpp b/test/vectorwiseop.cpp index f3ab561ee..8ee58841a 100644 --- a/test/vectorwiseop.cpp +++ b/test/vectorwiseop.cpp @@ -15,7 +15,6 @@ template<typename ArrayType> void vectorwiseop_array(const ArrayType& m) { - typedef typename ArrayType::Index Index; typedef typename ArrayType::Scalar Scalar; typedef Array<Scalar, ArrayType::RowsAtCompileTime, 1> ColVectorType; typedef Array<Scalar, 1, ArrayType::ColsAtCompileTime> RowVectorType; @@ -129,13 +128,13 @@ template<typename ArrayType> void vectorwiseop_array(const ArrayType& m) template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType; typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType; typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealColVectorType; typedef Matrix<RealScalar, 1, MatrixType::ColsAtCompileTime> RealRowVectorType; + typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX; Index rows = m.rows(); Index cols = m.cols(); @@ -151,6 +150,19 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m) RealColVectorType rcres; RealRowVectorType rrres; + // test broadcast assignment + m2 = m1; + m2.colwise() = colvec; + for(Index j=0; j<cols; ++j) + VERIFY_IS_APPROX(m2.col(j), colvec); + m2.rowwise() = rowvec; + for(Index i=0; i<rows; ++i) + VERIFY_IS_APPROX(m2.row(i), rowvec); + if(rows>1) + VERIFY_RAISES_ASSERT(m2.colwise() = colvec.transpose()); + if(cols>1) + VERIFY_RAISES_ASSERT(m2.rowwise() = rowvec.transpose()); + // test addition m2 = m1; @@ -199,11 +211,23 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m) VERIFY_RAISES_ASSERT(m1.rowwise() - rowvec.transpose()); } - // test norm - rrres = m1.colwise().norm(); - VERIFY_IS_APPROX(rrres(c), m1.col(c).norm()); - rcres = m1.rowwise().norm(); - VERIFY_IS_APPROX(rcres(r), m1.row(r).norm()); + // ------ partial reductions ------ + + #define TEST_PARTIAL_REDUX_BASIC(FUNC,ROW,COL,PREPROCESS) { \ + ROW = m1 PREPROCESS .colwise().FUNC ; \ + for(Index k=0; k<cols; ++k) VERIFY_IS_APPROX(ROW(k), m1.col(k) PREPROCESS .FUNC ); \ + COL = m1 PREPROCESS .rowwise().FUNC ; \ + for(Index k=0; k<rows; ++k) VERIFY_IS_APPROX(COL(k), m1.row(k) PREPROCESS .FUNC ); \ + } + + TEST_PARTIAL_REDUX_BASIC(sum(), rowvec,colvec,EIGEN_EMPTY); + TEST_PARTIAL_REDUX_BASIC(prod(), rowvec,colvec,EIGEN_EMPTY); + TEST_PARTIAL_REDUX_BASIC(mean(), rowvec,colvec,EIGEN_EMPTY); + TEST_PARTIAL_REDUX_BASIC(minCoeff(), rrres, rcres, .real()); + TEST_PARTIAL_REDUX_BASIC(maxCoeff(), rrres, rcres, .real()); + TEST_PARTIAL_REDUX_BASIC(norm(), rrres, rcres, EIGEN_EMPTY); + TEST_PARTIAL_REDUX_BASIC(squaredNorm(),rrres, rcres, EIGEN_EMPTY); + TEST_PARTIAL_REDUX_BASIC(redux(internal::scalar_sum_op<Scalar,Scalar>()),rowvec,colvec,EIGEN_EMPTY); VERIFY_IS_APPROX(m1.cwiseAbs().colwise().sum(), m1.colwise().template lpNorm<1>()); VERIFY_IS_APPROX(m1.cwiseAbs().rowwise().sum(), m1.rowwise().template lpNorm<1>()); @@ -237,14 +261,36 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m) m1 = m1.rowwise() - (m1.colwise().sum()/RealScalar(m1.rows())); VERIFY_IS_APPROX( m1, m2 ); VERIFY_EVALUATION_COUNT( m2 = (m1.rowwise() - m1.colwise().sum()/RealScalar(m1.rows())), (MatrixType::RowsAtCompileTime!=1 ? 1 : 0) ); + + // test empty expressions + VERIFY_IS_APPROX(m1.matrix().middleCols(0,0).rowwise().sum().eval(), MatrixX::Zero(rows,1)); + VERIFY_IS_APPROX(m1.matrix().middleRows(0,0).colwise().sum().eval(), MatrixX::Zero(1,cols)); + VERIFY_IS_APPROX(m1.matrix().middleCols(0,fix<0>).rowwise().sum().eval(), MatrixX::Zero(rows,1)); + VERIFY_IS_APPROX(m1.matrix().middleRows(0,fix<0>).colwise().sum().eval(), MatrixX::Zero(1,cols)); + + VERIFY_IS_APPROX(m1.matrix().middleCols(0,0).rowwise().prod().eval(), MatrixX::Ones(rows,1)); + VERIFY_IS_APPROX(m1.matrix().middleRows(0,0).colwise().prod().eval(), MatrixX::Ones(1,cols)); + VERIFY_IS_APPROX(m1.matrix().middleCols(0,fix<0>).rowwise().prod().eval(), MatrixX::Ones(rows,1)); + VERIFY_IS_APPROX(m1.matrix().middleRows(0,fix<0>).colwise().prod().eval(), MatrixX::Ones(1,cols)); + + VERIFY_IS_APPROX(m1.matrix().middleCols(0,0).rowwise().squaredNorm().eval(), MatrixX::Zero(rows,1)); + + VERIFY_RAISES_ASSERT(m1.real().middleCols(0,0).rowwise().minCoeff().eval()); + VERIFY_RAISES_ASSERT(m1.real().middleRows(0,0).colwise().maxCoeff().eval()); + VERIFY_IS_EQUAL(m1.real().middleRows(0,0).rowwise().maxCoeff().eval().rows(),0); + VERIFY_IS_EQUAL(m1.real().middleCols(0,0).colwise().maxCoeff().eval().cols(),0); + VERIFY_IS_EQUAL(m1.real().middleRows(0,fix<0>).rowwise().maxCoeff().eval().rows(),0); + VERIFY_IS_EQUAL(m1.real().middleCols(0,fix<0>).colwise().maxCoeff().eval().cols(),0); } -void test_vectorwiseop() +EIGEN_DECLARE_TEST(vectorwiseop) { CALL_SUBTEST_1( vectorwiseop_array(Array22cd()) ); CALL_SUBTEST_2( vectorwiseop_array(Array<double, 3, 2>()) ); CALL_SUBTEST_3( vectorwiseop_array(ArrayXXf(3, 4)) ); CALL_SUBTEST_4( vectorwiseop_matrix(Matrix4cf()) ); + CALL_SUBTEST_5( vectorwiseop_matrix(Matrix4f()) ); + CALL_SUBTEST_5( vectorwiseop_matrix(Vector4f()) ); CALL_SUBTEST_5( vectorwiseop_matrix(Matrix<float,4,5>()) ); CALL_SUBTEST_6( vectorwiseop_matrix(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_7( vectorwiseop_matrix(VectorXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) ); diff --git a/test/visitor.cpp b/test/visitor.cpp index 844170ec6..20fb2c3ed 100644 --- a/test/visitor.cpp +++ b/test/visitor.cpp @@ -12,7 +12,6 @@ template<typename MatrixType> void matrixVisitor(const MatrixType& p) { typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; Index rows = p.rows(); Index cols = p.cols(); @@ -57,15 +56,49 @@ template<typename MatrixType> void matrixVisitor(const MatrixType& p) VERIFY_IS_APPROX(maxc, m.maxCoeff()); eigen_maxc = (m.adjoint()*m).maxCoeff(&eigen_maxrow,&eigen_maxcol); - eigen_maxc = (m.adjoint()*m).eval().maxCoeff(&maxrow,&maxcol); - VERIFY(maxrow == eigen_maxrow); - VERIFY(maxcol == eigen_maxcol); + Index maxrow2=0,maxcol2=0; + eigen_maxc = (m.adjoint()*m).eval().maxCoeff(&maxrow2,&maxcol2); + VERIFY(maxrow2 == eigen_maxrow); + VERIFY(maxcol2 == eigen_maxcol); + + if (!NumTraits<Scalar>::IsInteger && m.size() > 2) { + // Test NaN propagation by replacing an element with NaN. + bool stop = false; + for (Index j = 0; j < cols && !stop; ++j) { + for (Index i = 0; i < rows && !stop; ++i) { + if (!(j == mincol && i == minrow) && + !(j == maxcol && i == maxrow)) { + m(i,j) = NumTraits<Scalar>::quiet_NaN(); + stop = true; + break; + } + } + } + + eigen_minc = m.template minCoeff<PropagateNumbers>(&eigen_minrow, &eigen_mincol); + eigen_maxc = m.template maxCoeff<PropagateNumbers>(&eigen_maxrow, &eigen_maxcol); + VERIFY(minrow == eigen_minrow); + VERIFY(maxrow == eigen_maxrow); + VERIFY(mincol == eigen_mincol); + VERIFY(maxcol == eigen_maxcol); + VERIFY_IS_APPROX(minc, eigen_minc); + VERIFY_IS_APPROX(maxc, eigen_maxc); + VERIFY_IS_APPROX(minc, m.template minCoeff<PropagateNumbers>()); + VERIFY_IS_APPROX(maxc, m.template maxCoeff<PropagateNumbers>()); + + eigen_minc = m.template minCoeff<PropagateNaN>(&eigen_minrow, &eigen_mincol); + eigen_maxc = m.template maxCoeff<PropagateNaN>(&eigen_maxrow, &eigen_maxcol); + VERIFY(minrow != eigen_minrow || mincol != eigen_mincol); + VERIFY(maxrow != eigen_maxrow || maxcol != eigen_maxcol); + VERIFY((numext::isnan)(eigen_minc)); + VERIFY((numext::isnan)(eigen_maxc)); + } + } template<typename VectorType> void vectorVisitor(const VectorType& w) { typedef typename VectorType::Scalar Scalar; - typedef typename VectorType::Index Index; Index size = w.size(); @@ -113,9 +146,34 @@ template<typename VectorType> void vectorVisitor(const VectorType& w) v2.maxCoeff(&eigen_maxidx); VERIFY(eigen_minidx == (std::min)(idx0,idx1)); VERIFY(eigen_maxidx == (std::min)(idx0,idx2)); + + if (!NumTraits<Scalar>::IsInteger && size > 2) { + // Test NaN propagation by replacing an element with NaN. + for (Index i = 0; i < size; ++i) { + if (i != minidx && i != maxidx) { + v(i) = NumTraits<Scalar>::quiet_NaN(); + break; + } + } + eigen_minc = v.template minCoeff<PropagateNumbers>(&eigen_minidx); + eigen_maxc = v.template maxCoeff<PropagateNumbers>(&eigen_maxidx); + VERIFY(minidx == eigen_minidx); + VERIFY(maxidx == eigen_maxidx); + VERIFY_IS_APPROX(minc, eigen_minc); + VERIFY_IS_APPROX(maxc, eigen_maxc); + VERIFY_IS_APPROX(minc, v.template minCoeff<PropagateNumbers>()); + VERIFY_IS_APPROX(maxc, v.template maxCoeff<PropagateNumbers>()); + + eigen_minc = v.template minCoeff<PropagateNaN>(&eigen_minidx); + eigen_maxc = v.template maxCoeff<PropagateNaN>(&eigen_maxidx); + VERIFY(minidx != eigen_minidx); + VERIFY(maxidx != eigen_maxidx); + VERIFY((numext::isnan)(eigen_minc)); + VERIFY((numext::isnan)(eigen_maxc)); + } } -void test_visitor() +EIGEN_DECLARE_TEST(visitor) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( matrixVisitor(Matrix<float, 1, 1>()) ); diff --git a/test/zerosized.cpp b/test/zerosized.cpp index 477ff0070..07afd0f86 100644 --- a/test/zerosized.cpp +++ b/test/zerosized.cpp @@ -16,9 +16,18 @@ template<typename MatrixType> void zeroReduction(const MatrixType& m) { VERIFY(!m.any()); VERIFY(m.prod()==1); VERIFY(m.sum()==0); + VERIFY(m.norm()==0); + VERIFY(m.squaredNorm()==0); VERIFY(m.count()==0); VERIFY(m.allFinite()); VERIFY(!m.hasNaN()); + VERIFY_RAISES_ASSERT( m.minCoeff() ); + VERIFY_RAISES_ASSERT( m.maxCoeff() ); + Index i,j; + VERIFY_RAISES_ASSERT( m.minCoeff(&i,&j) ); + VERIFY_RAISES_ASSERT( m.maxCoeff(&i,&j) ); + VERIFY_RAISES_ASSERT( m.reshaped().minCoeff(&i) ); + VERIFY_RAISES_ASSERT( m.reshaped().maxCoeff(&i) ); } @@ -81,7 +90,7 @@ template<typename VectorType> void zeroSizedVector() } } -void test_zerosized() +EIGEN_DECLARE_TEST(zerosized) { zeroSizedMatrix<Matrix2d>(); zeroSizedMatrix<Matrix3i>(); |